Esempio n. 1
0
def test_compose_with_bbox_noop(bboxes, bbox_format, labels):
    image = np.ones((100, 100, 3))
    if labels is not None:
        aug = Compose([NoOp(p=1.)], bbox_params={'format': bbox_format, 'label_fields': ['labels']})
        transformed = aug(image=image, bboxes=bboxes, labels=labels)
    else:
        aug = Compose([NoOp(p=1.)], bbox_params={'format': bbox_format})
        transformed = aug(image=image, bboxes=bboxes)
    assert np.array_equal(transformed['image'], image)
    assert transformed['bboxes'] == bboxes
Esempio n. 2
0
def test_compose_with_keypoint_noop(keypoints, keypoint_format, labels):
    image = np.ones((100, 100, 3))
    if labels is not None:
        aug = Compose([NoOp(p=1.)], keypoint_params={'format': keypoint_format, 'label_fields': ['labels']})
        transformed = aug(image=image, keypoints=keypoints, labels=labels)
    else:
        aug = Compose([NoOp(p=1.)], keypoint_params={'format': keypoint_format})
        transformed = aug(image=image, keypoints=keypoints)
    assert np.array_equal(transformed['image'], image)
    assert transformed['keypoints'] == keypoints
Esempio n. 3
0
def test_compose_with_bbox_noop(bboxes, bbox_format, labels):
    image = np.ones((100, 100, 3))
    if labels is not None:
        aug = Compose([NoOp(p=1.0)], bbox_params={"format": bbox_format, "label_fields": ["labels"]})
        transformed = aug(image=image, bboxes=bboxes, labels=labels)
    else:
        aug = Compose([NoOp(p=1.0)], bbox_params={"format": bbox_format})
        transformed = aug(image=image, bboxes=bboxes)
    assert np.array_equal(transformed["image"], image)
    assert np.all(np.isclose(transformed["bboxes"], bboxes))
Esempio n. 4
0
def test_compose_with_keypoint_noop(keypoints, keypoint_format, labels):
    image = np.ones((100, 100, 3))
    if labels is not None:
        aug = Compose([NoOp(p=1.0)], keypoint_params={"format": keypoint_format, "label_fields": ["labels"]})
        transformed = aug(image=image, keypoints=keypoints, labels=labels)
    else:
        aug = Compose([NoOp(p=1.0)], keypoint_params={"format": keypoint_format})
        transformed = aug(image=image, keypoints=keypoints)
    assert np.array_equal(transformed["image"], image)
    assert transformed["keypoints"] == keypoints
Esempio n. 5
0
def test_compose_with_bbox_noop_different_name(bboxes, bbox_format, labels):
    image = np.ones((100, 100, 3))
    if labels is not None:
        aug = Compose([NoOp(p=1.)], bbox_params={'format': bbox_format, 'label_fields': ['labels']},
                      additional_targets={'bboxes0': 'bboxes', 'image_left': 'image'})
        transformed = aug(image_left=image, bboxes0=bboxes, labels=labels)
    else:
        aug = Compose([NoOp(p=1.)], bbox_params={'format': bbox_format},
                      additional_targets={'bboxes0': 'bboxes', 'image_left': 'image'})
        transformed = aug(image_left=image, bboxes0=bboxes)
    assert np.array_equal(transformed['image_left'], image)
    assert transformed['bboxes0'] == bboxes
Esempio n. 6
0
def test_compose_with_bbox_noop_label_outside(bboxes, bbox_format, labels):
    image = np.ones((100, 100, 3))
    aug = Compose([NoOp(p=1.)], bbox_params={'format': bbox_format, 'label_fields': list(labels.keys())})
    transformed = aug(image=image, bboxes=bboxes, **labels)
    assert np.array_equal(transformed['image'], image)
    assert transformed['bboxes'] == bboxes
    for k, v in labels.items():
        assert transformed[k] == v
Esempio n. 7
0
def test_compose_with_keypoint_noop_label_outside(keypoints, keypoint_format, labels):
    image = np.ones((100, 100, 3))
    aug = Compose([NoOp(p=1.)], keypoint_params={'format': keypoint_format, 'label_fields': list(labels.keys())})
    transformed = aug(image=image, keypoints=keypoints, **labels)
    assert np.array_equal(transformed['image'], image)
    assert transformed['keypoints'] == keypoints
    for k, v in labels.items():
        assert transformed[k] == v
Esempio n. 8
0
def test_compose_with_keypoint_noop_error_label_fields(keypoints,
                                                       keypoint_format):
    image = np.ones((100, 100, 3))
    aug = Compose([NoOp(p=1.0)],
                  keypoint_params={
                      "format": keypoint_format,
                      "label_fields": "class_id"
                  })
    with pytest.raises(Exception):
        aug(image=image, keypoints=keypoints, cls_id=[0])
Esempio n. 9
0
def test_compose_with_keypoint_noop_label_outside(keypoints, keypoint_format,
                                                  labels):
    image = np.ones((100, 100, 3))
    aug = Compose(
        [NoOp(p=1.0)],
        keypoint_params={
            "format": keypoint_format,
            "label_fields": list(labels.keys()),
        },
    )
    transformed = aug(image=image, keypoints=keypoints, **labels)
    assert np.array_equal(transformed["image"], image)
    assert transformed["keypoints"] == keypoints
    for k, v in labels.items():
        assert transformed[k] == v
Esempio n. 10
0
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {'strong': albu.Compose([albu.HorizontalFlip(),
                                    albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=20, p=.4),
                                    albu.ElasticTransform(),
                                    albu.OpticalDistortion(),
                                    albu.OneOf([
                                        albu.CLAHE(clip_limit=2),
                                        albu.IAASharpen(),
                                        albu.IAAEmboss(),
                                        albu.RandomBrightnessContrast(),
                                        albu.RandomGamma()
                                    ], p=0.5),
                                    albu.OneOf([
                                        albu.RGBShift(),
                                        albu.HueSaturationValue(),
                                    ], p=0.5),
                                    ]),
            'weak': albu.Compose([albu.HorizontalFlip(),
                                  ]),
            'geometric': albu.OneOf([albu.HorizontalFlip(always_apply=True),
                                     albu.ShiftScaleRotate(always_apply=True, scale_limit=.5, rotate_limit=30),
                                     albu.Transpose(always_apply=True),
                                     albu.OpticalDistortion(always_apply=True, distort_limit=0.1, shift_limit=0.1),
                                     albu.ElasticTransform(always_apply=True),
                                     ]),
            'empty': NoOp(),
            }

    aug_fn = augs[scope]
    crop_fn = {'random': albu.RandomCrop(size, size, always_apply=True),
               'center': albu.CenterCrop(size, size, always_apply=True)}[crop]
    pad = albu.PadIfNeeded(size, size)

    pipeline = albu.Compose([aug_fn, crop_fn, pad])

    def process(a):
        r = pipeline(image=a)
        return r['image']

    return process
Esempio n. 11
0
def test_compose_with_bbox_noop_error_label_fields(bboxes, bbox_format):
    image = np.ones((100, 100, 3))
    aug = Compose([NoOp(p=1.)], bbox_params={'format': bbox_format})
    with pytest.raises(Exception):
        aug(image=image, bboxes=bboxes)
def main():
    args = parse_args()

    if args.name is None:
        args.name = '%s_%s' % (args.arch, datetime.now().strftime('%m%d%H'))

    if not os.path.exists('models/%s' % args.name):
        os.makedirs('models/%s' % args.name)

    if args.resume:
        args = joblib.load('models/%s/args.pkl' % args.name)
        args.resume = True

    print('Config -----')
    for arg in vars(args):
        print('- %s: %s' % (arg, getattr(args, arg)))
    print('------------')

    with open('models/%s/args.txt' % args.name, 'w') as f:
        for arg in vars(args):
            print('- %s: %s' % (arg, getattr(args, arg)), file=f)

    joblib.dump(args, 'models/%s/args.pkl' % args.name)

    if args.seed is not None and not args.resume:
        print('set random seed')
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)

    if args.loss == 'BCEWithLogitsLoss':
        criterion = BCEWithLogitsLoss().cuda()
    elif args.loss == 'WeightedBCEWithLogitsLoss':
        criterion = BCEWithLogitsLoss(weight=torch.Tensor([1., 1., 1., 1., 1., 2.]),
                                      smooth=args.label_smooth).cuda()
    elif args.loss == 'FocalLoss':
        criterion = FocalLoss().cuda()
    elif args.loss == 'WeightedFocalLoss':
        criterion = FocalLoss(weight=torch.Tensor([1., 1., 1., 1., 1., 2.])).cuda()
    else:
        raise NotImplementedError

    if args.pred_type == 'all':
        num_outputs = 6
    elif args.pred_type == 'except_any':
        num_outputs = 5
    else:
        raise NotImplementedError

    cudnn.benchmark = True

    # create model
    model = get_model(model_name=args.arch,
                      num_outputs=num_outputs,
                      freeze_bn=args.freeze_bn,
                      dropout_p=args.dropout_p,
                      pooling=args.pooling,
                      lp_p=args.lp_p)
    model = model.cuda()

    train_transform = Compose([
        transforms.Resize(args.img_size, args.img_size),
        transforms.HorizontalFlip() if args.hflip else NoOp(),
        transforms.VerticalFlip() if args.vflip else NoOp(),
        transforms.ShiftScaleRotate(
            shift_limit=args.shift_limit,
            scale_limit=args.scale_limit,
            rotate_limit=args.rotate_limit,
            border_mode=cv2.BORDER_CONSTANT,
            value=0,
            p=args.shift_scale_rotate_p
        ) if args.shift_scale_rotate else NoOp(),
        transforms.RandomContrast(
            limit=args.contrast_limit,
            p=args.contrast_p
        ) if args.contrast else NoOp(),
        RandomErase() if args.random_erase else NoOp(),
        transforms.CenterCrop(args.crop_size, args.crop_size) if args.center_crop else NoOp(),
        ForegroundCenterCrop(args.crop_size) if args.foreground_center_crop else NoOp(),
        transforms.RandomCrop(args.crop_size, args.crop_size) if args.random_crop else NoOp(),
        transforms.Normalize(mean=model.mean, std=model.std),
        ToTensor(),
    ])

    if args.img_type:
        stage_1_train_dir = 'processed/stage_1_train_%s' %args.img_type
    else:
        stage_1_train_dir = 'processed/stage_1_train'

    df = pd.read_csv('inputs/stage_1_train.csv')
    img_paths = np.array([stage_1_train_dir + '/' + '_'.join(s.split('_')[:-1]) + '.png' for s in df['ID']][::6])
    labels = np.array([df.loc[c::6, 'Label'].values for c in range(6)]).T.astype('float32')

    df = df[::6]
    df['img_path'] = img_paths
    for c in range(6):
        df['label_%d' %c] = labels[:, c]
    df['ID'] = df['ID'].apply(lambda s: '_'.join(s.split('_')[:-1]))

    meta_df = pd.read_csv('processed/stage_1_train_meta.csv')
    meta_df['ID'] = meta_df['SOPInstanceUID']
    test_meta_df = pd.read_csv('processed/stage_1_test_meta.csv')
    df = pd.merge(df, meta_df, how='left')

    patient_ids = meta_df['PatientID'].unique()
    test_patient_ids = test_meta_df['PatientID'].unique()
    if args.remove_test_patient_ids:
        patient_ids = np.array([s for s in patient_ids if not s in test_patient_ids])

    train_img_paths = np.hstack(df[['img_path', 'PatientID']].groupby(['PatientID'])['img_path'].apply(np.array).loc[patient_ids].to_list()).astype('str')
    train_labels = []
    for c in range(6):
        train_labels.append(np.hstack(df[['label_%d' %c, 'PatientID']].groupby(['PatientID'])['label_%d' %c].apply(np.array).loc[patient_ids].to_list()))
    train_labels = np.array(train_labels).T

    if args.resume:
        checkpoint = torch.load('models/%s/checkpoint.pth.tar' % args.name)

    # train
    train_set = Dataset(
        train_img_paths,
        train_labels,
        transform=train_transform)
    train_loader = torch.utils.data.DataLoader(
        train_set,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        # pin_memory=True,
    )

    if args.optimizer == 'Adam':
        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'AdamW':
        optimizer = optim.AdamW(
            filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'RAdam':
        optimizer = RAdam(
            filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'SGD':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr,
                              momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov)
    else:
        raise NotImplementedError

    if args.apex:
        amp.initialize(model, optimizer, opt_level='O1')

    if args.scheduler == 'CosineAnnealingLR':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=args.epochs, eta_min=args.min_lr)
    elif args.scheduler == 'MultiStepLR':
        scheduler = lr_scheduler.MultiStepLR(optimizer,
            milestones=[int(e) for e in args.milestones.split(',')], gamma=args.gamma)
    else:
        raise NotImplementedError

    log = {
        'epoch': [],
        'loss': [],
    }

    start_epoch = 0

    if args.resume:
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        start_epoch = checkpoint['epoch']
        log = pd.read_csv('models/%s/log.csv' % args.name).to_dict(orient='list')

    for epoch in range(start_epoch, args.epochs):
        print('Epoch [%d/%d]' % (epoch + 1, args.epochs))

        # train for one epoch
        train_loss = train(args, train_loader, model, criterion, optimizer, epoch)

        if args.scheduler == 'CosineAnnealingLR':
            scheduler.step()

        print('loss %.4f' % (train_loss))

        log['epoch'].append(epoch)
        log['loss'].append(train_loss)

        pd.DataFrame(log).to_csv('models/%s/log.csv' % args.name, index=False)

        torch.save(model.state_dict(), 'models/%s/model.pth' % args.name)
        print("=> saved model")

        state = {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
        }
        torch.save(state, 'models/%s/checkpoint.pth.tar' % args.name)
def main():
    config = vars(parse_args())

    if config['name'] is None:
        config['name'] = '%s_%s' % (config['arch'],
                                    datetime.now().strftime('%m%d%H'))

    config['num_filters'] = [int(n) for n in config['num_filters'].split(',')]

    if not os.path.exists('models/detection/%s' % config['name']):
        os.makedirs('models/detection/%s' % config['name'])

    if config['resume']:
        with open('models/detection/%s/config.yml' % config['name'], 'r') as f:
            config = yaml.load(f, Loader=yaml.FullLoader)
        config['resume'] = True

    with open('models/detection/%s/config.yml' % config['name'], 'w') as f:
        yaml.dump(config, f)

    print('-' * 20)
    for key in config.keys():
        print('- %s: %s' % (key, str(config[key])))
    print('-' * 20)

    cudnn.benchmark = True

    df = pd.read_csv('inputs/train.csv')
    img_paths = np.array('inputs/train_images/' + df['ImageId'].values +
                         '.jpg')
    mask_paths = np.array('inputs/train_masks/' + df['ImageId'].values +
                          '.jpg')
    labels = np.array(
        [convert_str_to_labels(s) for s in df['PredictionString']])

    test_img_paths = None
    test_mask_paths = None
    test_outputs = None
    if config['pseudo_label'] is not None:
        test_df = pd.read_csv('inputs/sample_submission.csv')
        test_img_paths = np.array('inputs/test_images/' +
                                  test_df['ImageId'].values + '.jpg')
        test_mask_paths = np.array('inputs/test_masks/' +
                                   test_df['ImageId'].values + '.jpg')
        ext = os.path.splitext(config['pseudo_label'])[1]
        if ext == '.pth':
            test_outputs = torch.load('outputs/raw/test/%s' %
                                      config['pseudo_label'])
        elif ext == '.csv':
            test_labels = pd.read_csv('outputs/submissions/test/%s' %
                                      config['pseudo_label'])
            null_idx = test_labels.isnull().any(axis=1)
            test_img_paths = test_img_paths[~null_idx]
            test_mask_paths = test_mask_paths[~null_idx]
            test_labels = test_labels.dropna()
            test_labels = np.array([
                convert_str_to_labels(
                    s, names=['pitch', 'yaw', 'roll', 'x', 'y', 'z', 'score'])
                for s in test_labels['PredictionString']
            ])
            print(test_labels)
        else:
            raise NotImplementedError

    if config['resume']:
        checkpoint = torch.load('models/detection/%s/checkpoint.pth.tar' %
                                config['name'])

    heads = OrderedDict([
        ('hm', 1),
        ('reg', 2),
        ('depth', 1),
    ])

    if config['rot'] == 'eular':
        heads['eular'] = 3
    elif config['rot'] == 'trig':
        heads['trig'] = 6
    elif config['rot'] == 'quat':
        heads['quat'] = 4
    else:
        raise NotImplementedError

    if config['wh']:
        heads['wh'] = 2

    criterion = OrderedDict()
    for head in heads.keys():
        criterion[head] = losses.__dict__[config[head + '_loss']]().cuda()

    train_transform = Compose([
        transforms.ShiftScaleRotate(shift_limit=config['shift_limit'],
                                    scale_limit=0,
                                    rotate_limit=0,
                                    border_mode=cv2.BORDER_CONSTANT,
                                    value=0,
                                    p=config['shift_p'])
        if config['shift'] else NoOp(),
        OneOf([
            transforms.HueSaturationValue(hue_shift_limit=config['hue_limit'],
                                          sat_shift_limit=config['sat_limit'],
                                          val_shift_limit=config['val_limit'],
                                          p=config['hsv_p'])
            if config['hsv'] else NoOp(),
            transforms.RandomBrightness(
                limit=config['brightness_limit'],
                p=config['brightness_p'],
            ) if config['brightness'] else NoOp(),
            transforms.RandomContrast(
                limit=config['contrast_limit'],
                p=config['contrast_p'],
            ) if config['contrast'] else NoOp(),
        ],
              p=1),
        transforms.ISONoise(p=config['iso_noise_p'], )
        if config['iso_noise'] else NoOp(),
        transforms.CLAHE(p=config['clahe_p'], ) if config['clahe'] else NoOp(),
    ],
                              keypoint_params=KeypointParams(
                                  format='xy', remove_invisible=False))

    val_transform = None

    folds = []
    best_losses = []
    # best_scores = []

    kf = KFold(n_splits=config['n_splits'], shuffle=True, random_state=41)
    for fold, (train_idx, val_idx) in enumerate(kf.split(img_paths)):
        print('Fold [%d/%d]' % (fold + 1, config['n_splits']))

        if (config['resume'] and fold < checkpoint['fold'] - 1) or (
                not config['resume']
                and os.path.exists('models/%s/model_%d.pth' %
                                   (config['name'], fold + 1))):
            log = pd.read_csv('models/detection/%s/log_%d.csv' %
                              (config['name'], fold + 1))
            best_loss = log.loc[log['val_loss'].values.argmin(), 'val_loss']
            # best_loss, best_score = log.loc[log['val_loss'].values.argmin(), ['val_loss', 'val_score']].values
            folds.append(str(fold + 1))
            best_losses.append(best_loss)
            # best_scores.append(best_score)
            continue

        train_img_paths, val_img_paths = img_paths[train_idx], img_paths[
            val_idx]
        train_mask_paths, val_mask_paths = mask_paths[train_idx], mask_paths[
            val_idx]
        train_labels, val_labels = labels[train_idx], labels[val_idx]

        if config['pseudo_label'] is not None:
            train_img_paths = np.hstack((train_img_paths, test_img_paths))
            train_mask_paths = np.hstack((train_mask_paths, test_mask_paths))
            train_labels = np.hstack((train_labels, test_labels))

        # train
        train_set = Dataset(
            train_img_paths,
            train_mask_paths,
            train_labels,
            input_w=config['input_w'],
            input_h=config['input_h'],
            transform=train_transform,
            lhalf=config['lhalf'],
            hflip=config['hflip_p'] if config['hflip'] else 0,
            scale=config['scale_p'] if config['scale'] else 0,
            scale_limit=config['scale_limit'],
            # test_img_paths=test_img_paths,
            # test_mask_paths=test_mask_paths,
            # test_outputs=test_outputs,
        )
        train_loader = torch.utils.data.DataLoader(
            train_set,
            batch_size=config['batch_size'],
            shuffle=True,
            num_workers=config['num_workers'],
            # pin_memory=True,
        )

        val_set = Dataset(val_img_paths,
                          val_mask_paths,
                          val_labels,
                          input_w=config['input_w'],
                          input_h=config['input_h'],
                          transform=val_transform,
                          lhalf=config['lhalf'])
        val_loader = torch.utils.data.DataLoader(
            val_set,
            batch_size=config['batch_size'],
            shuffle=False,
            num_workers=config['num_workers'],
            # pin_memory=True,
        )

        # create model
        model = get_model(config['arch'],
                          heads=heads,
                          head_conv=config['head_conv'],
                          num_filters=config['num_filters'],
                          dcn=config['dcn'],
                          gn=config['gn'],
                          ws=config['ws'],
                          freeze_bn=config['freeze_bn'])
        model = model.cuda()

        if config['load_model'] is not None:
            model.load_state_dict(
                torch.load('models/detection/%s/model_%d.pth' %
                           (config['load_model'], fold + 1)))

        params = filter(lambda p: p.requires_grad, model.parameters())
        if config['optimizer'] == 'Adam':
            optimizer = optim.Adam(params,
                                   lr=config['lr'],
                                   weight_decay=config['weight_decay'])
        elif config['optimizer'] == 'AdamW':
            optimizer = optim.AdamW(params,
                                    lr=config['lr'],
                                    weight_decay=config['weight_decay'])
        elif config['optimizer'] == 'RAdam':
            optimizer = RAdam(params,
                              lr=config['lr'],
                              weight_decay=config['weight_decay'])
        elif config['optimizer'] == 'SGD':
            optimizer = optim.SGD(params,
                                  lr=config['lr'],
                                  momentum=config['momentum'],
                                  nesterov=config['nesterov'],
                                  weight_decay=config['weight_decay'])
        else:
            raise NotImplementedError

        if config['apex']:
            amp.initialize(model, optimizer, opt_level='O1')

        if config['scheduler'] == 'CosineAnnealingLR':
            scheduler = lr_scheduler.CosineAnnealingLR(
                optimizer, T_max=config['epochs'], eta_min=config['min_lr'])
        elif config['scheduler'] == 'ReduceLROnPlateau':
            scheduler = lr_scheduler.ReduceLROnPlateau(
                optimizer,
                factor=config['factor'],
                patience=config['patience'],
                verbose=1,
                min_lr=config['min_lr'])
        elif config['scheduler'] == 'MultiStepLR':
            scheduler = lr_scheduler.MultiStepLR(
                optimizer,
                milestones=[int(e) for e in config['milestones'].split(',')],
                gamma=config['gamma'])
        else:
            raise NotImplementedError

        log = {
            'epoch': [],
            'loss': [],
            # 'score': [],
            'val_loss': [],
            # 'val_score': [],
        }

        best_loss = float('inf')
        # best_score = float('inf')

        start_epoch = 0

        if config['resume'] and fold == checkpoint['fold'] - 1:
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            start_epoch = checkpoint['epoch']
            log = pd.read_csv(
                'models/detection/%s/log_%d.csv' %
                (config['name'], fold + 1)).to_dict(orient='list')
            best_loss = checkpoint['best_loss']

        for epoch in range(start_epoch, config['epochs']):
            print('Epoch [%d/%d]' % (epoch + 1, config['epochs']))

            # train for one epoch
            train_loss = train(config, heads, train_loader, model, criterion,
                               optimizer, epoch)
            # evaluate on validation set
            val_loss = validate(config, heads, val_loader, model, criterion)

            if config['scheduler'] == 'CosineAnnealingLR':
                scheduler.step()
            elif config['scheduler'] == 'ReduceLROnPlateau':
                scheduler.step(val_loss)

            print('loss %.4f - val_loss %.4f' % (train_loss, val_loss))
            # print('loss %.4f - score %.4f - val_loss %.4f - val_score %.4f'
            #       % (train_loss, train_score, val_loss, val_score))

            log['epoch'].append(epoch)
            log['loss'].append(train_loss)
            # log['score'].append(train_score)
            log['val_loss'].append(val_loss)
            # log['val_score'].append(val_score)

            pd.DataFrame(log).to_csv('models/detection/%s/log_%d.csv' %
                                     (config['name'], fold + 1),
                                     index=False)

            if val_loss < best_loss:
                torch.save(
                    model.state_dict(), 'models/detection/%s/model_%d.pth' %
                    (config['name'], fold + 1))
                best_loss = val_loss
                # best_score = val_score
                print("=> saved best model")

            state = {
                'fold': fold + 1,
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_loss': best_loss,
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict(),
            }
            torch.save(
                state,
                'models/detection/%s/checkpoint.pth.tar' % config['name'])

        print('val_loss:  %f' % best_loss)
        # print('val_score: %f' % best_score)

        folds.append(str(fold + 1))
        best_losses.append(best_loss)
        # best_scores.append(best_score)

        results = pd.DataFrame({
            'fold':
            folds + ['mean'],
            'best_loss':
            best_losses + [np.mean(best_losses)],
            # 'best_score': best_scores + [np.mean(best_scores)],
        })

        print(results)
        results.to_csv('models/detection/%s/results.csv' % config['name'],
                       index=False)

        del model
        torch.cuda.empty_cache()

        del train_set, train_loader
        del val_set, val_loader
        gc.collect()

        if not config['cv']:
            break
Esempio n. 14
0
def main():
    config = vars(parse_args())

    if config['name'] is None:
        config['name'] = '%s_%s' % (config['arch'], datetime.now().strftime('%m%d%H'))

    if not os.path.exists('models/pose/%s' % config['name']):
        os.makedirs('models/pose/%s' % config['name'])

    if config['resume']:
        with open('models/pose/%s/config.yml' % config['name'], 'r') as f:
            config = yaml.load(f, Loader=yaml.FullLoader)
        config['resume'] = True

    with open('models/pose/%s/config.yml' % config['name'], 'w') as f:
        yaml.dump(config, f)

    print('-'*20)
    for key in config.keys():
        print('- %s: %s' % (key, str(config[key])))
    print('-'*20)

    cudnn.benchmark = True

    df = pd.read_csv('inputs/train.csv')
    img_ids = df['ImageId'].values
    pose_df = pd.read_csv('processed/pose_train.csv')
    pose_df['img_path'] = 'processed/pose_images/train/' + pose_df['img_path']

    if config['resume']:
        checkpoint = torch.load('models/pose/%s/checkpoint.pth.tar' % config['name'])

    if config['rot'] == 'eular':
        num_outputs = 3
    elif config['rot'] == 'trig':
        num_outputs = 6
    elif config['rot'] == 'quat':
        num_outputs = 4
    else:
        raise NotImplementedError

    if config['loss'] == 'L1Loss':
        criterion = nn.L1Loss().cuda()
    elif config['loss'] == 'MSELoss':
        criterion = nn.MSELoss().cuda()
    else:
        raise NotImplementedError

    train_transform = Compose([
        transforms.ShiftScaleRotate(
            shift_limit=config['shift_limit'],
            scale_limit=0,
            rotate_limit=0,
            border_mode=cv2.BORDER_CONSTANT,
            value=0,
            p=config['shift_p']
        ) if config['shift'] else NoOp(),
        OneOf([
            transforms.HueSaturationValue(
                hue_shift_limit=config['hue_limit'],
                sat_shift_limit=config['sat_limit'],
                val_shift_limit=config['val_limit'],
                p=config['hsv_p']
            ) if config['hsv'] else NoOp(),
            transforms.RandomBrightness(
                limit=config['brightness_limit'],
                p=config['brightness_p'],
            ) if config['brightness'] else NoOp(),
            transforms.RandomContrast(
                limit=config['contrast_limit'],
                p=config['contrast_p'],
            ) if config['contrast'] else NoOp(),
        ], p=1),
        transforms.ISONoise(
            p=config['iso_noise_p'],
        ) if config['iso_noise'] else NoOp(),
        transforms.CLAHE(
            p=config['clahe_p'],
        ) if config['clahe'] else NoOp(),
        transforms.Resize(config['input_w'], config['input_h']),
        transforms.Normalize(),
        ToTensor(),
    ])

    val_transform = Compose([
        transforms.Resize(config['input_w'], config['input_h']),
        transforms.Normalize(),
        ToTensor(),
    ])

    folds = []
    best_losses = []

    kf = KFold(n_splits=config['n_splits'], shuffle=True, random_state=41)
    for fold, (train_idx, val_idx) in enumerate(kf.split(img_ids)):
        print('Fold [%d/%d]' %(fold + 1, config['n_splits']))

        if (config['resume'] and fold < checkpoint['fold'] - 1) or (not config['resume'] and os.path.exists('pose_models/%s/model_%d.pth' % (config['name'], fold+1))):
            log = pd.read_csv('models/pose/%s/log_%d.csv' %(config['name'], fold+1))
            best_loss = log.loc[log['val_loss'].values.argmin(), 'val_loss']
            # best_loss, best_score = log.loc[log['val_loss'].values.argmin(), ['val_loss', 'val_score']].values
            folds.append(str(fold + 1))
            best_losses.append(best_loss)
            # best_scores.append(best_score)
            continue

        train_img_ids, val_img_ids = img_ids[train_idx], img_ids[val_idx]

        train_img_paths = []
        train_labels = []
        for img_id in train_img_ids:
            tmp = pose_df.loc[pose_df.ImageId == img_id]

            img_path = tmp['img_path'].values
            train_img_paths.append(img_path)

            yaw = tmp['yaw'].values
            pitch = tmp['pitch'].values
            roll = tmp['roll'].values
            roll = rotate(roll, np.pi)

            if config['rot'] == 'eular':
                label = np.array([
                    yaw,
                    pitch,
                    roll
                ]).T
            elif config['rot'] == 'trig':
                label = np.array([
                    np.cos(yaw),
                    np.sin(yaw),
                    np.cos(pitch),
                    np.sin(pitch),
                    np.cos(roll),
                    np.sin(roll),
                ]).T
            elif config['rot'] == 'quat':
                raise NotImplementedError
            else:
                raise NotImplementedError

            train_labels.append(label)
        train_img_paths = np.hstack(train_img_paths)
        train_labels = np.vstack(train_labels)

        val_img_paths = []
        val_labels = []
        for img_id in val_img_ids:
            tmp = pose_df.loc[pose_df.ImageId == img_id]

            img_path = tmp['img_path'].values
            val_img_paths.append(img_path)

            yaw = tmp['yaw'].values
            pitch = tmp['pitch'].values
            roll = tmp['roll'].values
            roll = rotate(roll, np.pi)

            if config['rot'] == 'eular':
                label = np.array([
                    yaw,
                    pitch,
                    roll
                ]).T
            elif config['rot'] == 'trig':
                label = np.array([
                    np.cos(yaw),
                    np.sin(yaw),
                    np.cos(pitch),
                    np.sin(pitch),
                    np.cos(roll),
                    np.sin(roll),
                ]).T
            elif config['rot'] == 'quat':
                raise NotImplementedError
            else:
                raise NotImplementedError

            val_labels.append(label)
        val_img_paths = np.hstack(val_img_paths)
        val_labels = np.vstack(val_labels)

        # train
        train_set = PoseDataset(
            train_img_paths,
            train_labels,
            transform=train_transform,
        )
        train_loader = torch.utils.data.DataLoader(
            train_set,
            batch_size=config['batch_size'],
            shuffle=True,
            num_workers=config['num_workers'],
            # pin_memory=True,
        )

        val_set = PoseDataset(
            val_img_paths,
            val_labels,
            transform=val_transform,
        )
        val_loader = torch.utils.data.DataLoader(
            val_set,
            batch_size=config['batch_size'],
            shuffle=False,
            num_workers=config['num_workers'],
            # pin_memory=True,
        )

        # create model
        model = get_pose_model(config['arch'],
                          num_outputs=num_outputs,
                          freeze_bn=config['freeze_bn'])
        model = model.cuda()

        params = filter(lambda p: p.requires_grad, model.parameters())
        if config['optimizer'] == 'Adam':
            optimizer = optim.Adam(params, lr=config['lr'], weight_decay=config['weight_decay'])
        elif config['optimizer'] == 'AdamW':
            optimizer = optim.AdamW(params, lr=config['lr'], weight_decay=config['weight_decay'])
        elif config['optimizer'] == 'RAdam':
            optimizer = RAdam(params, lr=config['lr'], weight_decay=config['weight_decay'])
        elif config['optimizer'] == 'SGD':
            optimizer = optim.SGD(params, lr=config['lr'], momentum=config['momentum'],
                                  nesterov=config['nesterov'], weight_decay=config['weight_decay'])
        else:
            raise NotImplementedError

        if config['scheduler'] == 'CosineAnnealingLR':
            scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=config['epochs'], eta_min=config['min_lr'])
        elif config['scheduler'] == 'ReduceLROnPlateau':
            scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=config['factor'], patience=config['patience'],
                                                       verbose=1, min_lr=config['min_lr'])
        elif config['scheduler'] == 'MultiStepLR':
            scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(e) for e in config['milestones'].split(',')], gamma=config['gamma'])
        else:
            raise NotImplementedError

        log = {
            'epoch': [],
            'loss': [],
            # 'score': [],
            'val_loss': [],
            # 'val_score': [],
        }

        best_loss = float('inf')
        # best_score = float('inf')

        start_epoch = 0

        if config['resume'] and fold == checkpoint['fold'] - 1:
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            start_epoch = checkpoint['epoch']
            log = pd.read_csv('models/pose/%s/log_%d.csv' % (config['name'], fold+1)).to_dict(orient='list')
            best_loss = checkpoint['best_loss']

        for epoch in range(start_epoch, config['epochs']):
            print('Epoch [%d/%d]' % (epoch + 1, config['epochs']))

            # train for one epoch
            train_loss = train(config, train_loader, model, criterion, optimizer, epoch)
            # evaluate on validation set
            val_loss = validate(config, val_loader, model, criterion)

            if config['scheduler'] == 'CosineAnnealingLR':
                scheduler.step()
            elif config['scheduler'] == 'ReduceLROnPlateau':
                scheduler.step(val_loss)

            print('loss %.4f - val_loss %.4f' % (train_loss, val_loss))
            # print('loss %.4f - score %.4f - val_loss %.4f - val_score %.4f'
            #       % (train_loss, train_score, val_loss, val_score))

            log['epoch'].append(epoch)
            log['loss'].append(train_loss)
            # log['score'].append(train_score)
            log['val_loss'].append(val_loss)
            # log['val_score'].append(val_score)

            pd.DataFrame(log).to_csv('models/pose/%s/log_%d.csv' % (config['name'], fold+1), index=False)

            if val_loss < best_loss:
                torch.save(model.state_dict(), 'models/pose/%s/model_%d.pth' % (config['name'], fold+1))
                best_loss = val_loss
                # best_score = val_score
                print("=> saved best model")

            state = {
                'fold': fold + 1,
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_loss': best_loss,
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict(),
            }
            torch.save(state, 'models/pose/%s/checkpoint.pth.tar' % config['name'])

        print('val_loss:  %f' % best_loss)
        # print('val_score: %f' % best_score)

        folds.append(str(fold + 1))
        best_losses.append(best_loss)
        # best_scores.append(best_score)

        results = pd.DataFrame({
            'fold': folds + ['mean'],
            'best_loss': best_losses + [np.mean(best_losses)],
            # 'best_score': best_scores + [np.mean(best_scores)],
        })

        print(results)
        results.to_csv('models/pose/%s/results.csv' % config['name'], index=False)

        del model
        torch.cuda.empty_cache()

        del train_set, train_loader
        del val_set, val_loader
        gc.collect()

        if not config['cv']:
            break