def load_net(checkpoint_path):
    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    config.num_classes = 2
    config.image_size = 512
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    net = DetBenchEval(net, config)
    net.eval()
    return net.cuda()
def load_net():
    config = get_efficientdet_config('tf_efficientdet_d0')
    net = EfficientDet(config, pretrained_backbone=False)

    config.num_classes = 1
    config.image_size=512
    net.class_net = HeadNet(config, num_outputs=config.num_classes, norm_kwargs=dict(eps=.001, momentum=.01))

    checkpoint = torch.load(DIR_PATH + '/models/effdet_trained.pth')
    net.load_state_dict(checkpoint["model_state_dict"])
    del checkpoint
    gc.collect()

    net = DetBenchEval(net, config)
    net.eval();
    return net.cuda()
def set_eval_effdet(checkpoint_path: str,
                    config,
                    num_classes: int = 1,
                    device: torch.device = 'cuda:0'):
    """Init EfficientDet to validation mode"""
    net = EfficientDet(config, pretrained_backbone=False)
    net.class_net = HeadNet(config,
                            num_outputs=num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))

    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint)

    net = DetBenchEval(net, config)
    net = net.eval()

    return net.to_device(device)
Exemple #4
0
def get_test_net(best_weigth):
    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)

    config.num_classes = 1
    config.image_size = 512
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))

    # checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(best_weigth)

    # del checkpoint
    gc.collect()

    net = DetBenchEval(net, config)
    net = net.train()
    return net.cuda()
def get_effdet_eval(checkpoint_path: str):
    config = get_efficientdet_config("tf_efficientdet_d5")
    model = EfficientDet(config, pretrained_backbone=False)

    config.num_classes = 1
    config.image_size = 512
    model.class_net = HeadNet(
        config,
        num_outputs=config.num_classes,
        norm_kwargs=dict(eps=0.001, momentum=0.01),
    )
    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint["model_state_dict"])

    del checkpoint
    gc.collect()

    model = DetBenchEval(model, config)
    model.eval()
    return model
Exemple #6
0
def load_model_for_eval(checkpoint_path, variant):
    config = get_efficientdet_config(f"tf_efficientdet_{variant}")
    net = EfficientDet(config, pretrained_backbone=False)

    config.num_classes = 1
    config.image_size = 512
    net.class_net = HeadNet(
        config,
        num_outputs=config.num_classes,
        norm_kwargs=dict(eps=0.001, momentum=0.01),
    )

    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint["model_state_dict"])

    del checkpoint
    gc.collect()

    net = DetBenchEval(net, config)
    net.eval()

    return net.cuda()
Exemple #7
0
def do_main():
    neptune.init('ods/wheat')
    # Create experiment with defined parameters
    neptune.create_experiment(name=model_name,
                              params=PARAMS,
                              tags=[experiment_name, experiment_tag],
                              upload_source_files=[os.path.basename(__file__)])

    neptune.append_tags(f'fold_{fold}')

    device = torch.device(f'cuda:{gpu_number}') if torch.cuda.is_available(
    ) else torch.device('cpu')
    print(device)

    print(len(train_boxes_df))
    print(len(train_images_df))

    # Leave only > 0
    print('Leave only train images with boxes (validation)')
    with_boxes_filter = train_images_df[image_id_column].isin(
        train_boxes_df[image_id_column].unique())

    negative_images = enumerate_images(DIR_NEGATIVE)
    negative_images = [(negative_prefix + filename[:-4])
                       for filename in negative_images]
    negative_images.sort()
    # take first 100 now...
    negative_images = negative_images[:100]
    """
    spike_images = enumerate_images(DIR_SPIKE)
    spike_images = [(spike_dataset_prefix + filename[:-4]) for filename in spike_images]
    spike_images.sort()
    assert len(spike_images) > 0
    """

    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    load_weights(net,
                 '../timm-efficientdet-pytorch/efficientdet_d5-ef44aea8.pth')

    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))

    model_train = DetBenchTrain(net, config)
    model_eval = DetBenchEval(net, config)

    manager = ModelManager(model_train, model_eval, device)

    pretrained_weights_file = 'pretrained.pth'

    images_val = train_images_df.loc[(train_images_df[fold_column] == fold)
                                     & with_boxes_filter,
                                     image_id_column].values
    images_train = train_images_df.loc[(train_images_df[fold_column] != fold),
                                       image_id_column].values

    #images_train = list(images_train) + list(negative_images) + list(spike_images)
    images_train = list(images_train) + list(negative_images)
    print(len(images_train), len(images_val))

    train_dataset = WheatDataset(images_train,
                                 DIR_TRAIN,
                                 train_boxes_df,
                                 transforms=get_train_transform(),
                                 is_test=False)
    valid_dataset = WheatDataset(images_val,
                                 DIR_TRAIN,
                                 train_boxes_df,
                                 transforms=get_valid_transform(),
                                 is_test=True)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=train_batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn,
                                   drop_last=True)

    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=inf_batch_size,
                                   shuffle=False,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    weights_file = f'{experiment_name}.pth'
    if os.path.exists(pretrained_weights_file):
        # continue training
        print('Continue training, loading weights: ' + pretrained_weights_file)
        load_weights(net, pretrained_weights_file)

    manager.run_train(train_data_loader,
                      valid_data_loader,
                      n_epoches=n_epochs,
                      weights_file=weights_file,
                      factor=factor,
                      start_lr=start_lr,
                      min_lr=min_lr,
                      lr_patience=lr_patience,
                      overall_patience=overall_patience,
                      loss_delta=loss_delta)

    # add tags
    neptune.log_text('save checkpoints as', weights_file[:-4])
    neptune.stop()
Exemple #8
0
def validate(args):
    # might as well try to validate something
    args.pretrained = args.pretrained or not args.checkpoint
    args.prefetcher = not args.no_prefetcher

    # create model
    config = get_efficientdet_config(args.model)
    model = EfficientDet(config)
    if args.checkpoint:
        load_checkpoint(model, args.checkpoint)

    param_count = sum([m.numel() for m in model.parameters()])
    print('Model %s created, param count: %d' % (args.model, param_count))

    bench = DetBenchEval(model, config)
    bench = bench.cuda()
    if has_amp:
        print('Using AMP mixed precision.')
        bench = amp.initialize(bench, opt_level='O1')
    else:
        print('AMP not installed, running network in FP32.')

    if args.num_gpu > 1:
        bench = torch.nn.DataParallel(bench,
                                      device_ids=list(range(args.num_gpu)))

    if 'test' in args.anno:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'image_info_{args.anno}.json')
        image_dir = 'test2017'
    else:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'instances_{args.anno}.json')
        image_dir = args.anno
    dataset = CocoDetection(os.path.join(args.data, image_dir),
                            annotation_path)

    loader = create_loader(dataset,
                           input_size=config.image_size,
                           batch_size=args.batch_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=args.interpolation,
                           num_workers=args.workers)

    img_ids = []
    results = []
    model.eval()
    batch_time = AverageMeter()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            output = bench(input, target['scale'])
            output = output.cpu()
            sample_ids = target['img_id'].cpu()
            for index, sample in enumerate(output):
                image_id = int(sample_ids[index])
                for det in sample:
                    score = float(det[4])
                    if score < .001:  # stop when below this threshold, scores in descending order
                        break
                    coco_det = dict(image_id=image_id,
                                    bbox=det[0:4].tolist(),
                                    score=score,
                                    category_id=int(det[5]))
                    img_ids.append(image_id)
                    results.append(coco_det)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(
                        i,
                        len(loader),
                        batch_time=batch_time,
                        rate_avg=input.size(0) / batch_time.avg,
                    ))

    json.dump(results, open(args.results, 'w'), indent=4)
    if 'test' not in args.anno:
        coco_results = dataset.coco.loadRes(args.results)
        coco_eval = COCOeval(dataset.coco, coco_results, 'bbox')
        coco_eval.params.imgIds = img_ids  # score only ids we've used
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

    return results
def do_main():
    device = torch.device(f'cuda:{gpu_number}') if torch.cuda.is_available(
    ) else torch.device('cpu')
    print(device)

    print(len(train_boxes_df))
    print(len(train_images_df))

    # Leave only > 0
    print('Leave only train images with boxes (all)')
    with_boxes_filter = train_images_df[image_id_column].isin(
        train_boxes_df[image_id_column].unique())

    images_val = train_images_df.loc[(train_images_df[fold_column] == fold)
                                     & with_boxes_filter,
                                     image_id_column].values
    images_train = train_images_df.loc[(train_images_df[fold_column] != fold)
                                       & with_boxes_filter,
                                       image_id_column].values

    print(len(images_train), len(images_val))

    train_dataset = WheatDataset(images_train,
                                 DIR_TRAIN,
                                 train_box_callback,
                                 transforms=get_train_transform(),
                                 is_test=False)
    valid_dataset = WheatDataset(images_val,
                                 DIR_TRAIN,
                                 train_box_callback,
                                 transforms=get_valid_transform(),
                                 is_test=True)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=train_batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=inf_batch_size,
                                   shuffle=False,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    #config = get_efficientdet_config('tf_efficientdet_d4')
    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    #load_weights(net, '../timm-efficientdet-pytorch/efficientdet_d4-5b370b7a.pth')
    load_weights(net,
                 '../timm-efficientdet-pytorch/efficientdet_d5-ef44aea8.pth')

    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))

    fold_weights_file = f'{experiment_name}.pth'
    if os.path.exists(fold_weights_file):
        # continue training
        print('Continue training, loading weights: ' + fold_weights_file)
        load_weights(net, fold_weights_file)

    model_train = DetBenchTrain(net, config)
    model_eval = DetBenchEval(net, config)

    manager = ModelManager(model_train, model_eval, device)
    weights_file = f'{experiment_name}.pth'

    manager.run_train(train_data_loader,
                      valid_data_loader,
                      n_epoches=n_epochs,
                      weights_file=weights_file,
                      factor=factor,
                      start_lr=start_lr,
                      min_lr=min_lr,
                      lr_patience=lr_patience,
                      overall_patience=overall_patience,
                      loss_delta=loss_delta)

    # add tags
    neptune.log_text('save checkpoints as', weights_file[:-4])
    neptune.stop()
def do_main():
    neptune.init('ods/wheat')
    # Create experiment with defined parameters
    neptune.create_experiment(name=model_name,
                              params=PARAMS,
                              tags=[experiment_name, experiment_tag],
                              upload_source_files=[os.path.basename(__file__)])

    neptune.append_tags(f'fold_{fold}')
    neptune.append_tags(['grad_accum'])

    device = torch.device(f'cuda:{gpu_number}') if torch.cuda.is_available(
    ) else torch.device('cpu')
    print(device)

    print(len(train_boxes_df))
    print(len(train_images_df))

    # Leave only > 0
    print('Leave only train images with boxes (validation)')
    with_boxes_filter = train_images_df[image_id_column].isin(
        train_boxes_df[image_id_column].unique())

    # config models fro train and validation
    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    load_weights(net,
                 '../timm-efficientdet-pytorch/efficientdet_d5-ef44aea8.pth')

    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))
    model_train = DetBenchTrain(net, config)
    model_eval = DetBenchEval(net, config)

    manager = ModelManager(model_train, model_eval, device)

    images_val = train_images_df.loc[(train_images_df[fold_column] == fold)
                                     & with_boxes_filter,
                                     image_id_column].values
    images_train = train_images_df.loc[(train_images_df[fold_column] != fold)
                                       & with_boxes_filter,
                                       image_id_column].values

    print(
        f'\nTrain images:{len(images_train)}, validation images {len(images_val)}'
    )

    # get augs
    #augs_dict = set_augmentations(our_image_size)

    # get datasets
    train_dataset = WheatDataset(
        image_ids=images_train[:160],
        image_dir=DIR_TRAIN,
        boxes_df=train_boxes_df,
        transforms=get_train_transform(our_image_size),
        is_test=False)
    valid_dataset = WheatDataset(
        image_ids=images_val[:160],
        image_dir=DIR_TRAIN,
        boxes_df=train_boxes_df,
        transforms=get_valid_transform(our_image_size),
        is_test=True)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=train_batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn,
                                   drop_last=True)

    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=inf_batch_size,
                                   shuffle=False,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    weights_file = f'../checkpoints/{model_name}/{experiment_name}.pth'

    #pretrain_weights_file = f'{checkpoints_dir}/{experiment_name}.pth'
    #if os.path.exists(pretrain_weights_file):
    #    print(f'Continue training, loading weights from {pretrain_weights_file}')
    #    load_weights(net, pretrain_weights_file)

    manager.run_train(train_generator=train_data_loader,
                      val_generator=valid_data_loader,
                      n_epoches=n_epochs,
                      weights_file=weights_file,
                      factor=factor,
                      start_lr=start_lr,
                      min_lr=min_lr,
                      lr_patience=lr_patience,
                      overall_patience=overall_patience,
                      loss_delta=loss_delta)

    # add tags
    neptune.log_text('save checkpoints as', weights_file[:-4])
    neptune.stop()
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    #weights_file = 'effdet_model14_fold' + str(fold_) + '.pth'
    weights_file = '../Weights/effdet_fold_1_model16_alex_fold1.pth'
    #weights_file = 'effdet_alex_fold0.pth'

    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))
    load_weights(net, weights_file)
    model = DetBenchEval(net, config)

    manager = ModelManager(model, device)

    true_list, pred_boxes, pred_scores = manager.predict(valid_data_loader)
    prob_thresholds = np.linspace(
        0.35, 0.45, num=10,
        endpoint=False)  # Prediction thresholds to consider a pixel positive
    best_metric = 0
    for thr in prob_thresholds:
        print('----------------------')
        print('thr', thr)
        cur_metric = competition_metric(true_list, pred_boxes, pred_scores,
                                        thr)
        if cur_metric > best_metric:
            best_thr = thr