Ejemplo n.º 1
0
def test_Resnet50Encoder():
    net = Resnet50Encoder().eval()
    print(count_parameters(net))
    x = torch.rand((4, 3, 512, 512))
    y = net(x)
    for yi in y:
        print(yi.size())
Ejemplo n.º 2
0
def test_retinanet34_shared():
    model = RRetinaNetShared(encoder=Resnet34Encoder(),
                             num_classes=1,
                             image_size=(768, 768))

    model.set_encoder_training_enabled(False)
    model.eval()
    model.cuda()

    print(count_parameters(model))

    boxes = np.array([[40, 50, 100, 20, 15], [200, 200, 16, 4, -33]],
                     dtype=np.float32)
    labels = np.array([0, 0], dtype=int)
    true_loc, true_cls = model.box_coder.encode(boxes, labels)

    image = torch.randn(1, 3, model.box_coder.image_height,
                        model.box_coder.image_width)
    pred_bboxes, pred_labels = model(image.cuda())
    print(pred_bboxes.size())
    print(pred_labels.size())

    cls_loss = SSDBinaryLabelLoss()
    loc_loss = RSSDLocationLoss()

    # Make tensor of proper shape & batch_size
    true_loc = torch.unsqueeze(torch.from_numpy(true_loc), 0)
    true_cls = torch.unsqueeze(torch.from_numpy(true_cls).long(), 0)

    floss = cls_loss(pred_bboxes, pred_labels, true_loc.cuda(),
                     true_cls.cuda())
    lloss = loc_loss(pred_bboxes, pred_labels, true_loc.cuda(),
                     true_cls.cuda())
    print('Random input', floss, lloss)
Ejemplo n.º 3
0
def test_se_resnext_unet():
    net = UNetModel(num_classes=1,
                    encoder=SEResNeXt50Encoder,
                    decoder_block=UnetDecoderBlockSE,
                    central_block=UnetCentralBlock)
    net.eval()
    net.set_encoder_training_enabled(False)
    print(count_parameters(net))
    x = torch.rand((4, 3, 512, 512))

    mask = net(x)
    print(mask.size())
    print(net)
Ejemplo n.º 4
0
def test():
    model = FPNSSD512(1)
    image = torch.randn(4, 3, 512, 512)
    output = model(image)
    print(count_parameters(model))

    model.cuda()
    image = image.cuda()

    with torch.cuda.profiler.profile() as prof:
        model(image)  # Warmup CUDA memory allocator and profiler
        with torch.autograd.profiler.emit_nvtx():
            model(image)

    print(prof)
Ejemplo n.º 5
0
        features = []
        x = image
        for mod in self.encoders:
            x = mod(x)
            features.append(x)
        return features

    def forward(self, image):
        features = self.get_encoder_features(image)

        x = self.center(F.max_pool2d(features[-1], kernel_size=2, stride=2))

        for encoder_features, decoder_block in zip(reversed(features),
                                                   self.decoder):
            x = decoder_block(x, encoder_features)

        x = F.interpolate(x,
                          scale_factor=4,
                          mode='bilinear',
                          align_corners=True)
        x = self.final(x)
        return x


if __name__ == '__main__':
    net = SenetUnet(num_classes=2).eval()
    print(count_parameters(net))

    mask = net(torch.rand((4, 3, 256, 256)))
    print(mask.size())
Ejemplo n.º 6
0
def resnext50(**kwargs):
    return ResNeXt([3, 4, 6, 3], **kwargs)


def resnext101(pretrained=True, input_3x3=True, abn_block=ABN, **kwargs):
    model = ResNeXt([3, 4, 23, 3],
                    input_3x3=input_3x3,
                    abn_block=abn_block,
                    classes=1000,
                    **kwargs)
    if pretrained and input_3x3:
        checkpoint = torch.load(
            os.path.join('pretrain', 'resnext101_ipabn_lr_512.pth.tar'))
        state_dict = checkpoint['state_dict']
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        model.load_state_dict(new_state_dict)
    return model


def resnext152(**kwargs):
    return ResNeXt([3, 8, 36, 3], **kwargs)


if __name__ == '__main__':
    print(count_parameters(resnext50()))
    print(count_parameters(resnext101()))
    print(count_parameters(resnext152()))
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=42, help='Random seed')
    parser.add_argument('-dd',
                        '--data-dir',
                        type=str,
                        default='d:\\datasets\\airbus',
                        help='Data dir')
    parser.add_argument('-m',
                        '--model',
                        type=str,
                        default='rretina_net',
                        help='')
    parser.add_argument('-b',
                        '--batch-size',
                        type=int,
                        default=4,
                        help='Batch Size during training, e.g. -b 64')
    parser.add_argument('-e',
                        '--epochs',
                        type=int,
                        default=150,
                        help='Epoch to run')
    parser.add_argument('-es',
                        '--early-stopping',
                        type=int,
                        default=None,
                        help='Maximum number of epochs without improvement')
    parser.add_argument('-f',
                        '--fold',
                        default=0,
                        type=int,
                        help='Fold to train')
    parser.add_argument('-fe',
                        '--freeze-encoder',
                        type=int,
                        default=0,
                        help='Freeze encoder parameters for N epochs')
    parser.add_argument('-ft', '--fine-tune', action='store_true')
    parser.add_argument('--fast', action='store_true')
    parser.add_argument('-lr',
                        '--learning-rate',
                        type=float,
                        default=1e-3,
                        help='Initial learning rate')
    parser.add_argument('-lrs',
                        '--lr-scheduler',
                        default=None,
                        help='LR scheduler')
    parser.add_argument('-o',
                        '--optimizer',
                        default='Adam',
                        help='Name of the optimizer')
    parser.add_argument('-r',
                        '--resume',
                        type=str,
                        default=None,
                        help='Checkpoint filename to resume')
    parser.add_argument('-w',
                        '--workers',
                        default=4,
                        type=int,
                        help='Num workers')
    parser.add_argument('-wd',
                        '--weight-decay',
                        type=float,
                        default=0,
                        help='L2 weight decay')
    parser.add_argument('-p', '--patch-size', type=int, default=768, help='')
    parser.add_argument('-ew', '--encoder-weights', default=None, type=str)

    args = parser.parse_args()
    set_manual_seed(args.seed)

    train_session_args = vars(args)
    train_session = get_random_name()
    current_time = datetime.now().strftime('%b%d_%H_%M')
    prefix = f'{current_time}_{args.model}_f{args.fold}_{train_session}_{args.patch_size}'
    if args.fast:
        prefix += '_fast'

    print(prefix)
    print(args)

    log_dir = os.path.join('runs', prefix)
    exp_dir = os.path.join('experiments', args.model, prefix)
    os.makedirs(exp_dir, exist_ok=True)

    model = get_model(args.model,
                      num_classes=1,
                      image_size=(args.patch_size, args.patch_size))
    print(count_parameters(model))

    train_loader, valid_loader = get_dataloaders(
        args.data_dir,
        box_coder=model.box_coder,
        fold=args.fold,
        patch_size=args.patch_size,
        train_batch_size=args.batch_size,
        valid_batch_size=args.batch_size,
        fast=args.fast)

    # Declare variables we will use during training
    start_epoch = 0
    train_history = pd.DataFrame()

    best_metric_val = 0
    best_lb_checkpoint = os.path.join(exp_dir, f'{prefix}.pth')

    if args.encoder_weights:
        classifier = get_model('seresnext_cls', num_classes=1)
        restore_checkpoint(auto_file(args.encoder_weights), classifier)
        encoder_state = classifier.encoder.state_dict()
        model.encoder.load_state_dict(encoder_state)
        del classifier

    if args.resume:
        fname = auto_file(args.resume)
        start_epoch, train_history, best_score = restore_checkpoint(
            fname, model)
        print(train_history)
        print('Resuming training from epoch', start_epoch, ' and score',
              best_score, args.resume)

    writer = SummaryWriter(log_dir)
    writer.add_text('train/params',
                    '```' + json.dumps(train_session_args, indent=2) + '```',
                    0)
    # log_model_graph(writer, model)

    config_fname = os.path.join(exp_dir, f'{train_session}.json')
    with open(config_fname, 'w') as f:
        f.write(json.dumps(train_session_args, indent=2))

    # Main training phase
    model.cuda()
    trainable_parameters = filter(lambda p: p.requires_grad,
                                  model.parameters())
    optimizer = get_optimizer(args.optimizer,
                              trainable_parameters,
                              args.learning_rate,
                              weight_decay=args.weight_decay)
    # scheduler = ReduceLROnPlateau(optimizer, mode='max', patience=50, factor=0.5, min_lr=1e-5)
    scheduler = None

    train_history, best_metric_val, start_epoch = train(
        model,
        optimizer,
        scheduler,
        train_loader,
        valid_loader,
        writer,
        start_epoch,
        epochs=args.epochs,
        early_stopping=args.early_stopping,
        train_history=train_history,
        experiment_dir=exp_dir,
        best_metric_val=best_metric_val,
        checkpoint_filename=best_lb_checkpoint)

    train_history.to_csv(os.path.join(exp_dir, 'train_history.csv'),
                         index=False)
    print('Training finished')
    del train_loader, valid_loader, optimizer

    # Restore to best model
    restore_checkpoint(best_lb_checkpoint, model)

    # Make OOF predictions
    _, valid_ids = get_train_test_split_for_fold(args.fold)
    validset_full = D.RSSDDataset(sample_ids=valid_ids,
                                  data_dir=args.data_dir,
                                  transform=get_transform(
                                      training=False,
                                      width=args.patch_size,
                                      height=args.patch_size),
                                  box_coder=model.box_coder)
    oof_predictions = model.predict_as_csv(validset_full,
                                           batch_size=args.batch_size,
                                           workers=args.workers)
    oof_predictions.to_csv(os.path.join(exp_dir,
                                        f'{prefix}_oof_predictions.csv'),
                           index=None)
    del validset_full

    testset_full = D.RSSDDataset(sample_ids=all_test_ids(args.data_dir),
                                 test=True,
                                 data_dir=args.data_dir,
                                 transform=get_transform(
                                     training=False,
                                     width=args.patch_size,
                                     height=args.patch_size),
                                 box_coder=model.box_coder)
    test_predictions = model.predict_as_csv(testset_full,
                                            batch_size=args.batch_size,
                                            workers=args.workers)
    test_predictions.to_csv(os.path.join(exp_dir,
                                         f'{prefix}_test_predictions.csv'),
                            index=None)
    print('Predictions saved')