def main(): config = vars(parse_args()) if config['name'] is None: config['name'] = '%s_%s' % (config['arch'], datetime.now().strftime('%m%d%H')) config['num_filters'] = [int(n) for n in config['num_filters'].split(',')] if not os.path.exists('models/detection/%s' % config['name']): os.makedirs('models/detection/%s' % config['name']) if config['resume']: with open('models/detection/%s/config.yml' % config['name'], 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) config['resume'] = True with open('models/detection/%s/config.yml' % config['name'], 'w') as f: yaml.dump(config, f) print('-' * 20) for key in config.keys(): print('- %s: %s' % (key, str(config[key]))) print('-' * 20) cudnn.benchmark = True df = pd.read_csv('inputs/train.csv') img_paths = np.array('inputs/train_images/' + df['ImageId'].values + '.jpg') mask_paths = np.array('inputs/train_masks/' + df['ImageId'].values + '.jpg') labels = np.array( [convert_str_to_labels(s) for s in df['PredictionString']]) test_img_paths = None test_mask_paths = None test_outputs = None if config['pseudo_label'] is not None: test_df = pd.read_csv('inputs/sample_submission.csv') test_img_paths = np.array('inputs/test_images/' + test_df['ImageId'].values + '.jpg') test_mask_paths = np.array('inputs/test_masks/' + test_df['ImageId'].values + '.jpg') ext = os.path.splitext(config['pseudo_label'])[1] if ext == '.pth': test_outputs = torch.load('outputs/raw/test/%s' % config['pseudo_label']) elif ext == '.csv': test_labels = pd.read_csv('outputs/submissions/test/%s' % config['pseudo_label']) null_idx = test_labels.isnull().any(axis=1) test_img_paths = test_img_paths[~null_idx] test_mask_paths = test_mask_paths[~null_idx] test_labels = test_labels.dropna() test_labels = np.array([ convert_str_to_labels( s, names=['pitch', 'yaw', 'roll', 'x', 'y', 'z', 'score']) for s in test_labels['PredictionString'] ]) print(test_labels) else: raise NotImplementedError if config['resume']: checkpoint = torch.load('models/detection/%s/checkpoint.pth.tar' % config['name']) heads = OrderedDict([ ('hm', 1), ('reg', 2), ('depth', 1), ]) if config['rot'] == 'eular': heads['eular'] = 3 elif config['rot'] == 'trig': heads['trig'] = 6 elif config['rot'] == 'quat': heads['quat'] = 4 else: raise NotImplementedError if config['wh']: heads['wh'] = 2 criterion = OrderedDict() for head in heads.keys(): criterion[head] = losses.__dict__[config[head + '_loss']]().cuda() train_transform = Compose([ transforms.ShiftScaleRotate(shift_limit=config['shift_limit'], scale_limit=0, rotate_limit=0, border_mode=cv2.BORDER_CONSTANT, value=0, p=config['shift_p']) if config['shift'] else NoOp(), OneOf([ transforms.HueSaturationValue(hue_shift_limit=config['hue_limit'], sat_shift_limit=config['sat_limit'], val_shift_limit=config['val_limit'], p=config['hsv_p']) if config['hsv'] else NoOp(), transforms.RandomBrightness( limit=config['brightness_limit'], p=config['brightness_p'], ) if config['brightness'] else NoOp(), transforms.RandomContrast( limit=config['contrast_limit'], p=config['contrast_p'], ) if config['contrast'] else NoOp(), ], p=1), transforms.ISONoise(p=config['iso_noise_p'], ) if config['iso_noise'] else NoOp(), transforms.CLAHE(p=config['clahe_p'], ) if config['clahe'] else NoOp(), ], keypoint_params=KeypointParams( format='xy', remove_invisible=False)) val_transform = None folds = [] best_losses = [] # best_scores = [] kf = KFold(n_splits=config['n_splits'], shuffle=True, random_state=41) for fold, (train_idx, val_idx) in enumerate(kf.split(img_paths)): print('Fold [%d/%d]' % (fold + 1, config['n_splits'])) if (config['resume'] and fold < checkpoint['fold'] - 1) or ( not config['resume'] and os.path.exists('models/%s/model_%d.pth' % (config['name'], fold + 1))): log = pd.read_csv('models/detection/%s/log_%d.csv' % (config['name'], fold + 1)) best_loss = log.loc[log['val_loss'].values.argmin(), 'val_loss'] # best_loss, best_score = log.loc[log['val_loss'].values.argmin(), ['val_loss', 'val_score']].values folds.append(str(fold + 1)) best_losses.append(best_loss) # best_scores.append(best_score) continue train_img_paths, val_img_paths = img_paths[train_idx], img_paths[ val_idx] train_mask_paths, val_mask_paths = mask_paths[train_idx], mask_paths[ val_idx] train_labels, val_labels = labels[train_idx], labels[val_idx] if config['pseudo_label'] is not None: train_img_paths = np.hstack((train_img_paths, test_img_paths)) train_mask_paths = np.hstack((train_mask_paths, test_mask_paths)) train_labels = np.hstack((train_labels, test_labels)) # train train_set = Dataset( train_img_paths, train_mask_paths, train_labels, input_w=config['input_w'], input_h=config['input_h'], transform=train_transform, lhalf=config['lhalf'], hflip=config['hflip_p'] if config['hflip'] else 0, scale=config['scale_p'] if config['scale'] else 0, scale_limit=config['scale_limit'], # test_img_paths=test_img_paths, # test_mask_paths=test_mask_paths, # test_outputs=test_outputs, ) train_loader = torch.utils.data.DataLoader( train_set, batch_size=config['batch_size'], shuffle=True, num_workers=config['num_workers'], # pin_memory=True, ) val_set = Dataset(val_img_paths, val_mask_paths, val_labels, input_w=config['input_w'], input_h=config['input_h'], transform=val_transform, lhalf=config['lhalf']) val_loader = torch.utils.data.DataLoader( val_set, batch_size=config['batch_size'], shuffle=False, num_workers=config['num_workers'], # pin_memory=True, ) # create model model = get_model(config['arch'], heads=heads, head_conv=config['head_conv'], num_filters=config['num_filters'], dcn=config['dcn'], gn=config['gn'], ws=config['ws'], freeze_bn=config['freeze_bn']) model = model.cuda() if config['load_model'] is not None: model.load_state_dict( torch.load('models/detection/%s/model_%d.pth' % (config['load_model'], fold + 1))) params = filter(lambda p: p.requires_grad, model.parameters()) if config['optimizer'] == 'Adam': optimizer = optim.Adam(params, lr=config['lr'], weight_decay=config['weight_decay']) elif config['optimizer'] == 'AdamW': optimizer = optim.AdamW(params, lr=config['lr'], weight_decay=config['weight_decay']) elif config['optimizer'] == 'RAdam': optimizer = RAdam(params, lr=config['lr'], weight_decay=config['weight_decay']) elif config['optimizer'] == 'SGD': optimizer = optim.SGD(params, lr=config['lr'], momentum=config['momentum'], nesterov=config['nesterov'], weight_decay=config['weight_decay']) else: raise NotImplementedError if config['apex']: amp.initialize(model, optimizer, opt_level='O1') if config['scheduler'] == 'CosineAnnealingLR': scheduler = lr_scheduler.CosineAnnealingLR( optimizer, T_max=config['epochs'], eta_min=config['min_lr']) elif config['scheduler'] == 'ReduceLROnPlateau': scheduler = lr_scheduler.ReduceLROnPlateau( optimizer, factor=config['factor'], patience=config['patience'], verbose=1, min_lr=config['min_lr']) elif config['scheduler'] == 'MultiStepLR': scheduler = lr_scheduler.MultiStepLR( optimizer, milestones=[int(e) for e in config['milestones'].split(',')], gamma=config['gamma']) else: raise NotImplementedError log = { 'epoch': [], 'loss': [], # 'score': [], 'val_loss': [], # 'val_score': [], } best_loss = float('inf') # best_score = float('inf') start_epoch = 0 if config['resume'] and fold == checkpoint['fold'] - 1: model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) start_epoch = checkpoint['epoch'] log = pd.read_csv( 'models/detection/%s/log_%d.csv' % (config['name'], fold + 1)).to_dict(orient='list') best_loss = checkpoint['best_loss'] for epoch in range(start_epoch, config['epochs']): print('Epoch [%d/%d]' % (epoch + 1, config['epochs'])) # train for one epoch train_loss = train(config, heads, train_loader, model, criterion, optimizer, epoch) # evaluate on validation set val_loss = validate(config, heads, val_loader, model, criterion) if config['scheduler'] == 'CosineAnnealingLR': scheduler.step() elif config['scheduler'] == 'ReduceLROnPlateau': scheduler.step(val_loss) print('loss %.4f - val_loss %.4f' % (train_loss, val_loss)) # print('loss %.4f - score %.4f - val_loss %.4f - val_score %.4f' # % (train_loss, train_score, val_loss, val_score)) log['epoch'].append(epoch) log['loss'].append(train_loss) # log['score'].append(train_score) log['val_loss'].append(val_loss) # log['val_score'].append(val_score) pd.DataFrame(log).to_csv('models/detection/%s/log_%d.csv' % (config['name'], fold + 1), index=False) if val_loss < best_loss: torch.save( model.state_dict(), 'models/detection/%s/model_%d.pth' % (config['name'], fold + 1)) best_loss = val_loss # best_score = val_score print("=> saved best model") state = { 'fold': fold + 1, 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_loss': best_loss, 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), } torch.save( state, 'models/detection/%s/checkpoint.pth.tar' % config['name']) print('val_loss: %f' % best_loss) # print('val_score: %f' % best_score) folds.append(str(fold + 1)) best_losses.append(best_loss) # best_scores.append(best_score) results = pd.DataFrame({ 'fold': folds + ['mean'], 'best_loss': best_losses + [np.mean(best_losses)], # 'best_score': best_scores + [np.mean(best_scores)], }) print(results) results.to_csv('models/detection/%s/results.csv' % config['name'], index=False) del model torch.cuda.empty_cache() del train_set, train_loader del val_set, val_loader gc.collect() if not config['cv']: break
image2 = np.copy(image) data = aug(image=image) assert "replay" in data data2 = ReplayCompose.replay(data["replay"], image=image2) assert np.array_equal(data["image"], data2["image"]) @pytest.mark.parametrize( ["targets", "bbox_params", "keypoint_params", "expected"], [ [ { "keypoints": [[10, 10], [70, 70], [10, 70], [70, 10]] }, None, KeypointParams("xy", check_each_transform=False), { "keypoints": np.array([[10, 10], [70, 70], [10, 70], [70, 10]]) + 25 }, ], [ { "keypoints": [[10, 10], [70, 70], [10, 70], [70, 10]] }, None, KeypointParams("xy", check_each_transform=True), { "keypoints": np.array([[10, 10]]) + 25 }, ],