Esempio n. 1
0
def evaluate(val_loader, model, loss_fn, device, use_tta=False):
    model.eval()

    if use_tta:
        transformations = tta.Compose([
            tta.Rotate90(angles=[0, 90, 180, 270]),
            tta.HorizontalFlip(),
            tta.VerticalFlip()
        ])

        tta_model = tta.ClassificationTTAWrapper(model, transformations)

    correct = 0
    total = 0
    total_loss = 0
    for i, batch in enumerate(val_loader):
        input_data, labels = batch
        input_data, labels = input_data.to(device), labels.to(device)
        with torch.no_grad():
            if use_tta:
                predictions = tta_model(input_data)
            else:
                predictions = model(input_data)
            total_loss += loss_fn(predictions, labels).item()
            correct += (predictions.argmax(axis=1) == labels).sum().item()
            total += len(labels)
            torch.cuda.empty_cache()

    model.train()
    return total_loss / total, correct / total
Esempio n. 2
0
def SUE_TTA(model, batch: torch.tensor, last_layer: bool) -> Tuple[np.ndarray, np.ndarray]:
    r"""Interface of Binary Segmentation Uncertainty Estimation with Test-Time Augmentations (TTA) method for 1 2D slice.
            Inputs supposed to be in range [0, data_range].
            Args:
                model: Trained model.
                batch: Tensor with shape (1, C, H, W).
                last_layer: Flag whether there is Sigmoid as a last NN layer
            Returns:
                Aleatoric and epistemic uncertainty maps with shapes equal to batch shape
     """
    model.eval()
    transforms = tta.Compose(
        [
            tta.VerticalFlip(),
            tta.HorizontalFlip(),
            tta.Rotate90(angles=[0, 180]),
            tta.Scale(scales=[1, 2, 4]),
            tta.Multiply(factors=[0.9, 1, 1.1]),
        ]
    )
    predicted = []
    for transformer in transforms:
        augmented_image = transformer.augment_image(batch)
        model_output = model(augmented_image)
        deaug_mask = transformer.deaugment_mask(model_output)
        prediction = torch.sigmoid(
            deaug_mask).cpu().detach().numpy() if last_layer else deaug_mask.cpu().detach().numpy()
        predicted.append(prediction)

    p_hat = np.array(predicted)
    aleatoric = calc_aleatoric(p_hat)
    epistemic = calc_epistemic(p_hat)

    return aleatoric, epistemic
Esempio n. 3
0
def process_folder():
    image_folder = add_backslash_if_needed(IMAGE_FOLDER)
    model_folder = add_backslash_if_needed(MODEL_FOLDER)
    image_names = list(os.listdir(image_folder))
    image_names.sort()

    transforms = tta.Compose(
                [
                    tta.HorizontalFlip(),
                ]
            )
    all_thrs = [0.18, 0.22, 0.22, 0.2, 0.2]
    all_preds = []
    for ind in range(5):
        system = AppleClassification(model_path=f'{model_folder}fold{ind}.ckpt',
                                     device='cuda:0',
                                     transforms=transforms,
                                     th=all_thrs[ind])
        labels, probs = system.process_folder(image_folder, image_names, num_workers=NUM_WORKERS)
        # float, will multiply by weights
        labels = np.array(labels, dtype=float)
        all_preds.append(labels)

    weights = np.array([1, 1, 1, 1, 1], dtype=float)
    weighted_values = weights[0] * all_preds[0]
    for ind in range(1, 5):
        weighted_values += weights[ind] * all_preds[ind]
    weighted_values = weighted_values / np.sum(weights)
    final_preds = (weighted_values > 0.5).astype(int)

    df = pd.DataFrame({'name': image_names,
                       'disease_flag': final_preds})
    df.to_csv(OUTPUT_FILE, index=False)

    return
Esempio n. 4
0
def get_predictions(model_chosen, tta = False):

    model_chosen.cuda.eval()
    actual_values, predicted_values = [], []

    if tta == True:
        transformation = ttach.Compose(
            [
                ttach.HorizontalFlip(),
                ttach.VerticalFlip(),
                ttach.Rotate90(angles=[0, 90, 180, 270])
            ]
        )
        test_time_augmentation_wrapper = ttach.ClassificationTTAWrapper(model_chosen, transformation)
        with torch.no_grad():
            for batch in loader_of_test:
                test_image, test_label = batch
                predicted_value = test_time_augmentation_wrapper(test_image.cuda())
                predicted_value = torch.argmax(predicted_value, dim=1).detach().cpu().numpy()
                actual_values.append(test_label.cpu().numpy())
                predicted_values.append(predicted_value)
    else:
        with torch.no_grad():
            for batch in loader_of_test:
                test_image, test_label = batch
                predicted_value = model_chosen(test_image.cuda())
                predicted_value = torch.argmax(predicted_value, dim=1).detach().cpu().numpy()
                actual_values.append(test_label.cpu().numpy())
                predicted_values.append(predicted_value)

    return predicted_values
Esempio n. 5
0
def multi_model_predict_tta():
    preds_dict = dict()
    for model_name in model_name_list:
        for fold_idx in range(5):
            model = Net(model_name).to(device)
            model_save_path = os.path.join(
                config.dir_weight, '{}_fold{}.bin'.format(model_name, fold_idx))
            model.load_state_dict(torch.load(model_save_path))
            '/home/muyun99/data/dataset/AIyanxishe/Image_Classification/weight/resnet18_train_size_256_fold0.bin'
            transforms = tta.Compose([
                tta.Resize([int(config.size_test_image), int(config.size_test_image)]),
                tta.HorizontalFlip(),
                # tta.Rotate90(angles=[0, 180]),
                # tta.Scale(scales=[1, 2, 4]),
                # tta.Multiply(factors=[0.9, 1, 1.1]),
                tta.FiveCrops(config.size_test_image, config.size_test_image)
            ])
            tta_model = tta.ClassificationTTAWrapper(model, transforms)

            pred_list = predict(tta_model)
            submission = pd.DataFrame(pred_list)
            submission.to_csv(
                '{}/{}_fold{}_submission.csv'.format(config.dir_submission, config.save_model_name, fold_idx),
                index=False,
                header=False
            )
            preds_dict['{}_{}'.format(model_name, fold_idx)] = pred_list

    pred_list = get_pred_list(preds_dict)
    submission = pd.DataFrame(
        {"id": range(len(pred_list)), "label": [int2label(x) for x in pred_list]})
    submission.to_csv(config.dir_csv_test, index=False, header=False)
Esempio n. 6
0
def single_model_predict_tta():
    assert len(model_name_list) == 1
    model_name = model_name_list[0]
    model = Net(model_name).to(device)
    model_save_path = os.path.join(
        config.dir_weight, '{}.bin'.format(model_name))
    model.load_state_dict(torch.load(model_save_path))

    transforms = tta.Compose([
        tta.HorizontalFlip(),
        # tta.Rotate90(angles=[0, 180]),
        # tta.Scale(scales=[1, 2, 4]),
        # tta.Multiply(factors=[0.9, 1, 1.1]),
        tta.FiveCrops(224, 224)
    ])

    tta_model = tta.ClassificationTTAWrapper(model, transforms)

    pred_list = []
    with torch.no_grad():
        for batch_x, _ in tqdm(test_loader):
            batch_x = batch_x.to(device)
            probs = tta_model(batch_x)
            probs = torch.max(torch.softmax(probs, dim=1), dim=1)
            probs = probs[1].cpu().numpy()
            pred_list += probs.tolist()

    submission = pd.DataFrame({
        "id": range(len(pred_list)),
        "label": [int2label(x) for x in pred_list]
    })
    submission.to_csv(config.dir_csv_test, index=False, header=False)
 def __init__(self, config):
     self.config = config
     self.classes = config.CLASS_NAME
     self.input_size = config.INPUT_SIZE
     self.binary_option = config.BINARY
     self.failClasses = config.FAIL_CLASSNAME
     self.passClasses = config.PASS_CLASSNAME
     self.pass_class_index = [
         self.classes.index(class_) for class_ in self.passClasses
     ]
     self.fail_class_index = [
         self.classes.index(class_) for class_ in self.failClasses
     ]
     self.pytorch_model = None
     # self.train_generator = None
     # self.val_generator = None
     # self.test_generator = None
     self.class_weights = None
     self.evaluate_generator = None
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     self.tta_rotate_opt_list = [0, 90, 180, 270]
     self.tta_option = tta.Compose([
         tta.Rotate90(
             self.tta_rotate_opt_list),  # For future developing with CAM
         # tta.HorizontalFlip(),
         # tta.VerticalFlip()
     ])
     # Toggle TTA option
     self.tta_opt = True
     self.global_batch_size = self.config.BATCH_SIZE * self.config.GPU_COUNT if self.config.GPU_COUNT != 0 else self.config.BATCH_SIZE
Esempio n. 8
0
    def forward_augmentation_smoothing(
            self,
            input_tensor: torch.Tensor,
            targets: List[torch.nn.Module],
            eigen_smooth: bool = False) -> np.ndarray:
        transforms = tta.Compose([
            tta.HorizontalFlip(),
            tta.Multiply(factors=[0.9, 1, 1.1]),
        ])
        cams = []
        for transform in transforms:
            augmented_tensor = transform.augment_image(input_tensor)
            cam = self.forward(augmented_tensor, targets, eigen_smooth)

            # The ttach library expects a tensor of size BxCxHxW
            cam = cam[:, None, :, :]
            cam = torch.from_numpy(cam)
            cam = transform.deaugment_mask(cam)

            # Back to numpy float32, HxW
            cam = cam.numpy()
            cam = cam[:, 0, :, :]
            cams.append(cam)

        cam = np.mean(np.float32(cams), axis=0)
        return cam
 def build_tta_model(self, model, config, device):
     tta_model = getattr(tta, config["tta"])(
         model,
         tta.Compose([tta.HorizontalFlip(),
                      tta.VerticalFlip()]),
         merge_mode="mean",
     )
     tta_model.to(device)
     return tta_model
Esempio n. 10
0
def main():
    load_dotenv('cassava.env')
    seed_everything(SEED)

    root_path = os.getenv('ROOT_PATH')
    train_csv_path = root_path + 'train.csv'
    train_root_path = root_path + 'train_images'

    num_classes = int(os.getenv('NUM_CLASSES', 5))
    num_epoch = int(os.getenv('NUM_EPOCH', 10))
    num_folds = int(os.getenv('NUM_FOLDS', 5))
    batch_size = int(os.getenv('BATCH_SIZE'), 16)
    grad_acc = int(os.getenv('GRAD_ACC', 8))

    resize = os.getenv('RESIZE', 224)

    normalize = A.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    train_transform = A.Compose([
        A.HorizontalFlip(),
        A.ShiftScaleRotate(p=1.0),
        A.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.2, hue=0.0, p=1.0, always_apply=False),
        A.RandomResizedCrop(resize, resize, p=1.0, always_apply=True),
        normalize,
        ToTensorV2(p=1.0),
    ], p=1.0)
    test_transform = A.Compose([
        A.Resize(int(resize * 1.5), int(resize * 1.5)),
        normalize,
        ToTensorV2(p=1.0),
    ], p=1.0)
    tta_transform = tta.Compose([
        tta.FiveCrops(resize, resize),
    ])

    criterion = MixedLabelLoss(nn.CrossEntropyLoss(reduction='none'))
    augmentations = [snapmix, ]

    df = pd.read_csv(train_csv_path)
    folds = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=SEED).split(df['image_id'], df['label'])
    for _fold, (train, test) in enumerate(folds):
        train = df.iloc[train]
        test = df.iloc[test]
        scheduler = optim.lr_scheduler.CosineAnnealingLR

        model = TimmNet('efficientnet_b3a', num_classes, criterion, learning_rate=1e-3, scheduler=scheduler,
                        n_epoch=num_epoch, eta_min=1e-6, augmentations=augmentations, tta_transform=tta_transform)
        dm = DataFrameDataModule(train, train_root_path, test, batch_size=batch_size,
                                 train_transform=train_transform, test_transform=test_transform)

        mlf_logger = MLFlowLogger(
            experiment_name='cassava',
            tracking_uri='file:./cassava'
        )
        trainer = Trainer(gpus=-1, precision=32, deterministic=True, accumulate_grad_batches=grad_acc,
                          profiler='simple', val_check_interval=1.0, logger=mlf_logger, max_epochs=num_epoch)
        trainer.fit(model, datamodule=dm)
Esempio n. 11
0
 def __init__(self):
     super(Net, self).__init__()
     self.transforms = ttach.Compose([
         ttach.HorizontalFlip(),
         # ttach.Scale(scales=[1, 1.05], interpolation="linear"),
         ttach.Multiply(factors=[0.95, 1, 1.05]),
     ])
     self.model = ttach.ClassificationTTAWrapper(InnerNet(),
                                                 transforms=self.transforms,
                                                 merge_mode="mean")
Esempio n. 12
0
def main():
    device = torch.device(f"cuda" if torch.cuda.is_available() else 'cpu')

    transforms = tta.Compose([ tta.HorizontalFlip() ])

    #best_threshold, best_min_size_threshold = search_threshold(device, transforms)
    best_threshold = [0.8, 0.7, 0.8, 0.7]
    best_min_size_threshold = 0

    predict(best_threshold, best_min_size_threshold, device, transforms)
Esempio n. 13
0
def init_model(model_path):
    transforms = tta.Compose([
        tta.HorizontalFlip(),
    ])
    system = AppleClassification(model_path=model_path,
                                 device='cuda:0',
                                 transforms=None,
                                 th=0.2,
                                 gradcam=True)
    return system
Esempio n. 14
0
def single_model_predict_tta(predict_model):
    transforms = tta.Compose([
        # tta.HorizontalFlip(),
        # tta.VerticalFlip(),
        # tta.FiveCrops(200, 200)
    ])

    tta_model = tta.ClassificationTTAWrapper(predict_model, transforms)
    pred_cls_list = single_model_predict(tta_model)
    return pred_cls_list
Esempio n. 15
0
def init_model():
    transforms = tta.Compose(
                [
                    tta.HorizontalFlip(),
                ]
            )
    system = AppleClassification(model_path=MODEL_PATH,
                                 device='cuda:0',
                                 transforms=transforms,
                                 th=0.2)
    return system
Esempio n. 16
0
    def predict(self, tta_aug=None, debug=None):
        transforms = tta_aug
        if tta_aug is None:
            import ttach as tta
            transforms = tta.Compose([
                tta.Scale(scales=[0.95, 1, 1.05]),
                tta.HorizontalFlip(),
            ])
        from torch.utils import data

        self.model.eval()

        if not isinstance(self.settings, PredictorSettings):
            logger.warning(
                'Settings is of type: {}. Pass settings to network object of type Train to train'
                .format(str(type(self.settings))))
            return
        predict_loader = data.DataLoader(dataset=self.settings.PREDICT_DATASET,
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=self.settings.PROCESSES)
        with torch.no_grad():
            for idx, (data, target, id) in enumerate(predict_loader):
                data, target = data.to(self.device), target.to(
                    self.device, dtype=torch.int64)
                outputs = []
                o_shape = data.shape
                for transformer in transforms:
                    augmented_image = transformer.augment_image(data)
                    shape = list(augmented_image.shape)[2:]
                    padded = pad(augmented_image, self.padding_value)  ## 2**5

                    input = padded.float()
                    output = self.model(input)
                    output = unpad(output, shape)
                    reversed = transformer.deaugment_mask(output)
                    reversed = torch.nn.functional.interpolate(
                        reversed, size=list(o_shape)[2:], mode="nearest")
                    print(
                        "original: {} input: {}, padded: {} unpadded {} output {}"
                        .format(str(o_shape), str(shape),
                                str(list(augmented_image.shape)),
                                str(list(output.shape)),
                                str(list(reversed.shape))))
                    outputs.append(reversed)
                stacked = torch.stack(outputs)
                output = torch.mean(stacked, dim=0)
                outputs.append(output)
                out = output.data.cpu().numpy()
                out = np.transpose(out, (0, 2, 3, 1))
                out = np.squeeze(out)
                yield out
Esempio n. 17
0
def test_time_aug(net, merge_mode='mean'):
    """
    More operations please assess to this url: https://github.com/qubvel/ttach
    """
    print("Using the test time augmentation! [Default: HorizontalFlip]")
    trans = tta.Compose([
        tta.HorizontalFlip(),
        # tta.Rotate90(angles=[0, 180]),
        # tta.Scale(scales=[1, 2]),
        # tta.Multiply(factors=[0.9, 1, 1.1]),
    ])
    net = tta.SegmentationTTAWrapper(net, trans, merge_mode=merge_mode)
    return net
def tta_model_predict(X, model):
    tta_transforms = tta.Compose(
        [tta.HorizontalFlip(),
         tta.Scale(scales=[0.5, 1, 2])])
    masks = []
    for transformer in tta_transforms:
        augmented_image = transformer.augment_image(X)
        model_output = model(augmented_image)["out"]

        deaug_mask = transformer.deaugment_mask(model_output)
        masks.append(deaug_mask)

    mask = torch.sum(torch.stack(masks), dim=0) / len(masks)
    return mask
Esempio n. 19
0
def predict(model_path, test_loader, saveFileName, iftta):

    ## predict
    model = initialize_model(num_classes=176)

    # create model and load weights from checkpoint
    model = model.to(device)
    model.load_state_dict(torch.load(model_path))

    if iftta:
        print("Using TTA")
        transforms = tta.Compose(
            [
                tta.HorizontalFlip(),
                tta.VerticalFlip(),
                tta.Rotate90(angles=[0, 180]),
                # tta.Scale(scales=[1, 0.3]), 
            ]
        )
        model = tta.ClassificationTTAWrapper(model, transforms)

    # Make sure the model is in eval mode.
    # Some modules like Dropout or BatchNorm affect if the model is in training mode.
    model.eval()
    
    # Initialize a list to store the predictions.
    predictions = []
    # Iterate the testing set by batches.
    for batch in tqdm(test_loader):

        imgs = batch
        with torch.no_grad():
            logits = model(imgs.to(device))
        
        # Take the class with greatest logit as prediction and record it.
        predictions.extend(logits.argmax(dim=-1).cpu().numpy().tolist())

    preds = []
    for i in predictions:
        preds.append(num_to_class[i])

    test_data = pd.read_csv('leaves_data/test.csv')
    test_data['label'] = pd.Series(preds)
    submission = pd.concat([test_data['image'], test_data['label']], axis=1)
    submission.to_csv(saveFileName, index=False)
    print("Done!!!!!!!!!!!!!!!!!!!!!!!!!!!")
Esempio n. 20
0
def init_model():
    path = os.path.join(os.path.dirname(__file__), 'net.pth')
    model = DeepLab(output_stride=16,
                    class_num=17,
                    pretrained=False,
                    bn_momentum=0.1,
                    freeze_bn=False)
    model.load_state_dict(torch.load(path))

    transforms = tta.Compose([
        tta.HorizontalFlip(),
        tta.Rotate90(angles=[0, 180]),
    ])

    model = tta.SegmentationTTAWrapper(model, transforms)
    model = model.cuda()

    return model
Esempio n. 21
0
def get_tta_model(model: nn.Module, crop_method: str,
                  input_size: List[int]) -> nn.Module:
    """Wraps input model to TTA model.

    Args:
        model: input model without TTA
        crop_method: one of {'resize', 'crop'}. Cropping method of the input images
        input_size: model's input size

    Returns:
        Model with TTA
    """

    transforms = [ttach.HorizontalFlip()]
    if crop_method == "crop":
        transforms.append(
            ThreeCrops(crop_height=input_size[0], crop_width=input_size[1]))
    transforms = ttach.Compose(transforms)
    model = ttach.ClassificationTTAWrapper(model, transforms)

    return model
Esempio n. 22
0
def test_compose_1():
    transform = tta.Compose([
        tta.HorizontalFlip(),
        tta.VerticalFlip(),
        tta.Rotate90(angles=[0, 90, 180, 270]),
        tta.Scale(scales=[1, 2, 4], interpolation="nearest"),
    ])

    assert len(
        transform) == 2 * 2 * 4 * 3  # all combinations for aug parameters

    dummy_label = torch.ones(2).reshape(2, 1).float()
    dummy_image = torch.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5).float()
    dummy_model = lambda x: {"label": dummy_label, "mask": x}

    for augmenter in transform:
        augmented_image = augmenter.augment_image(dummy_image)
        model_output = dummy_model(augmented_image)
        deaugmented_mask = augmenter.deaugment_mask(model_output["mask"])
        deaugmented_label = augmenter.deaugment_label(model_output["label"])
        assert torch.allclose(deaugmented_mask, dummy_image)
        assert torch.allclose(deaugmented_label, dummy_label)
    if fold_flag:
        _, test = get_fold_filelist(csv_file, K=fold_K, fold=fold_index)
        test_img_list = [img_path+sep+i[0] for i in test]
        if mask_path is not None:
            test_mask_list = [mask_path+sep+i[0] for i in test]
    else:
        test_img_list = get_filelist_frompath(img_path,'PNG')
        if mask_path is not None:
            test_mask_list = [mask_path + sep + i.split(sep)[-1] for i in test_img_list]

    # 构建两个模型
    with torch.no_grad():
        # tta设置
        tta_trans = tta.Compose([
            tta.VerticalFlip(),
            tta.HorizontalFlip(),
            tta.Rotate90(angles=[0,180]),
        ])
        # 构建模型
        # cascade1
        model_cascade1 = smp.DeepLabV3Plus(encoder_name="efficientnet-b6", encoder_weights=None, in_channels=1, classes=1)
        model_cascade1.to(device)
        model_cascade1.load_state_dict(torch.load(weight_c1))
        if c1_tta:
            model_cascade1 = tta.SegmentationTTAWrapper(model_cascade1, tta_trans,merge_mode='mean')
        model_cascade1.eval()
        # cascade2
        model_cascade2 = smp.DeepLabV3Plus(encoder_name="efficientnet-b6", encoder_weights=None, in_channels=1, classes=1)
        # model_cascade2 = smp.Unet(encoder_name="efficientnet-b6", encoder_weights=None, in_channels=1, classes=1, encoder_depth=5, decoder_attention_type='scse')
        # model_cascade2 = smp.PAN(encoder_name="efficientnet-b6",encoder_weights='imagenet',	in_channels=1, classes=1)
        model_cascade2.to(device)

for i in range(5):
    train(i)


def load_model(fold: int, epoch: int, device: torch.device = 'cuda'):

    model = EfficientNetModel().to(device)
    model.load_state_dict(
        torch.load(f'models/effinet_b4_SAM_CosLR-f{fold}-{epoch}.pth'))

    return model


transforms = tta.Compose([tta.HorizontalFlip(), tta.VerticalFlip()])
tta_model = tta.ClassificationTTAWrapper(model, transforms)


def test(device: torch.device = 'cuda'):
    submit = pd.read_csv('data/sample_submission.csv')

    model1 = load_model(0, 19)
    model2 = load_model(1, 19)
    model3 = load_model(2, 19)
    model4 = load_model(3, 19)
    model5 = load_model(4, 19)

    tta_model1 = tta.ClassificationTTAWrapper(model1, transforms)
    tta_model2 = tta.ClassificationTTAWrapper(model2, transforms)
    tta_model3 = tta.ClassificationTTAWrapper(model3, transforms)
Esempio n. 25
0
def segmentation(
    param,
    input_image,
    label_arr,
    num_classes: int,
    gpkg_name,
    model,
    chunk_size: int,
    device,
    scale: List,
    BGR_to_RGB: bool,
    tp_mem,
    debug=False,
):
    """

    Args:
        param: parameter dict
        input_image: opened image (rasterio object)
        label_arr: numpy array of label if available
        num_classes: number of classes
        gpkg_name: geo-package name if available
        model: model weights
        chunk_size: image tile size
        device: cuda/cpu device
        scale: scale range
        BGR_to_RGB: True/False
        tp_mem: memory temp file for saving numpy array to disk
        debug: True/False

    Returns:

    """
    xmin, ymin, xmax, ymax = (input_image.bounds.left,
                              input_image.bounds.bottom,
                              input_image.bounds.right, input_image.bounds.top)
    xres, yres = (abs(input_image.transform.a), abs(input_image.transform.e))
    mx = chunk_size * xres
    my = chunk_size * yres
    padded = chunk_size * 2
    h = input_image.height
    w = input_image.width
    h_ = h + padded
    w_ = w + padded
    dist_samples = int(round(chunk_size * (1 - 1.0 / 2.0)))

    # switch to evaluate mode
    model.eval()

    # initialize test time augmentation
    transforms = tta.Compose([
        tta.HorizontalFlip(),
    ])
    # construct window for smoothing
    WINDOW_SPLINE_2D = _window_2D(window_size=padded, power=2.0)
    WINDOW_SPLINE_2D = torch.as_tensor(np.moveaxis(WINDOW_SPLINE_2D, 2,
                                                   0), ).type(torch.float)
    WINDOW_SPLINE_2D = WINDOW_SPLINE_2D.to(device)

    fp = np.memmap(tp_mem,
                   dtype='float16',
                   mode='w+',
                   shape=(h_, w_, num_classes))
    sample = {'sat_img': None, 'map_img': None, 'metadata': None}
    cnt = 0
    img_gen = gen_img_samples(input_image, chunk_size)
    start_seg = time.time()
    for img in tqdm(img_gen,
                    position=1,
                    leave=False,
                    desc='inferring on window slices'):
        row = img[1]
        col = img[2]
        sub_image = img[0]
        image_metadata = add_metadata_from_raster_to_sample(
            sat_img_arr=sub_image,
            raster_handle=input_image,
            meta_map={},
            raster_info={})

        sample['metadata'] = image_metadata
        totensor_transform = augmentation.compose_transforms(
            param,
            dataset="tst",
            input_space=BGR_to_RGB,
            scale=scale,
            aug_type='totensor')
        sample['sat_img'] = sub_image
        sample = totensor_transform(sample)
        inputs = sample['sat_img'].unsqueeze_(0)
        inputs = inputs.to(device)
        if inputs.shape[1] == 4 and any("module.modelNIR" in s
                                        for s in model.state_dict().keys()):
            ############################
            # Test Implementation of the NIR
            ############################
            # Init NIR   TODO: make a proper way to read the NIR channel
            #                  and put an option to be able to give the idex of the NIR channel
            # Extract the NIR channel -> [batch size, H, W] since it's only one channel
            inputs_NIR = inputs[:, -1, ...]
            # add a channel to get the good size -> [:, 1, :, :]
            inputs_NIR.unsqueeze_(1)
            # take out the NIR channel and take only the RGB for the inputs
            inputs = inputs[:, :-1, ...]
            # Suggestion of implementation
            # inputs_NIR = data['NIR'].to(device)
            inputs = [inputs, inputs_NIR]
            # outputs = model(inputs, inputs_NIR)
            ############################
            # End of the test implementation module
            ############################
        output_lst = []
        for transformer in transforms:
            # augment inputs
            augmented_input = transformer.augment_image(inputs)
            augmented_output = model(augmented_input)
            if isinstance(augmented_output,
                          OrderedDict) and 'out' in augmented_output.keys():
                augmented_output = augmented_output['out']
            logging.debug(
                f'Shape of augmented output: {augmented_output.shape}')
            # reverse augmentation for outputs
            deaugmented_output = transformer.deaugment_mask(augmented_output)
            deaugmented_output = F.softmax(deaugmented_output,
                                           dim=1).squeeze(dim=0)
            output_lst.append(deaugmented_output)
        outputs = torch.stack(output_lst)
        outputs = torch.mul(outputs, WINDOW_SPLINE_2D)
        outputs, _ = torch.max(outputs, dim=0)
        outputs = outputs.permute(1, 2, 0)
        outputs = outputs.reshape(padded, padded,
                                  num_classes).cpu().numpy().astype('float16')
        outputs = outputs[dist_samples:-dist_samples,
                          dist_samples:-dist_samples, :]
        fp[row:row + chunk_size, col:col + chunk_size, :] = \
            fp[row:row + chunk_size, col:col + chunk_size, :] + outputs
        cnt += 1
    fp.flush()
    del fp

    fp = np.memmap(tp_mem,
                   dtype='float16',
                   mode='r',
                   shape=(h_, w_, num_classes))
    subdiv = 2.0
    step = int(chunk_size / subdiv)
    pred_img = np.zeros((h_, w_), dtype=np.uint8)
    for row in tqdm(range(0, input_image.height, step),
                    position=2,
                    leave=False):
        for col in tqdm(range(0, input_image.width, step),
                        position=3,
                        leave=False):
            arr1 = fp[row:row + chunk_size, col:col + chunk_size, :] / (2**2)
            arr1 = arr1.argmax(axis=-1).astype('uint8')
            pred_img[row:row + chunk_size, col:col + chunk_size] = arr1
    pred_img = pred_img[:h, :w]
    end_seg = time.time() - start_seg
    logging.info('Segmentation operation completed in {:.0f}m {:.0f}s'.format(
        end_seg // 60, end_seg % 60))

    if debug:
        logging.debug(
            f'Bin count of final output: {np.unique(pred_img, return_counts=True)}'
        )
    gdf = None
    if label_arr is not None:
        start_seg_ = time.time()
        feature = defaultdict(list)
        cnt = 0
        for row in tqdm(range(0, h, chunk_size), position=2, leave=False):
            for col in tqdm(range(0, w, chunk_size), position=3, leave=False):
                label = label_arr[row:row + chunk_size, col:col + chunk_size]
                pred = pred_img[row:row + chunk_size, col:col + chunk_size]
                pixelMetrics = ComputePixelMetrics(label.flatten(),
                                                   pred.flatten(), num_classes)
                eval = pixelMetrics.update(pixelMetrics.iou)
                feature['id_image'].append(gpkg_name)
                for c_num in range(num_classes):
                    feature['L_count_' + str(c_num)].append(
                        int(np.count_nonzero(label == c_num)))
                    feature['P_count_' + str(c_num)].append(
                        int(np.count_nonzero(pred == c_num)))
                    feature['IoU_' + str(c_num)].append(eval['iou_' +
                                                             str(c_num)])
                feature['mIoU'].append(eval['macro_avg_iou'])
                x_1, y_1 = (xmin + (col * xres)), (ymax - (row * yres))
                x_2, y_2 = (xmin + ((col * xres) + mx)), y_1
                x_3, y_3 = x_2, (ymax - ((row * yres) + my))
                x_4, y_4 = x_1, y_3
                geom = Polygon([(x_1, y_1), (x_2, y_2), (x_3, y_3),
                                (x_4, y_4)])
                feature['geometry'].append(geom)
                feature['length'].append(geom.length)
                feature['pointx'].append(geom.centroid.x)
                feature['pointy'].append(geom.centroid.y)
                feature['area'].append(geom.area)
                cnt += 1
        gdf = gpd.GeoDataFrame(feature, crs=input_image.crs)
        gdf.to_crs(crs="EPSG:4326", inplace=True)
        end_seg_ = time.time() - start_seg_
        logging.info('Benchmark operation completed in {:.0f}m {:.0f}s'.format(
            end_seg_ // 60, end_seg_ % 60))
    input_image.close()
    return pred_img, gdf
Esempio n. 26
0
def test(model, data_loader, save_path=""):
    """
    为了计算方便,训练过程中的验证与测试都直接计算指标J和F,不再先生成再输出,
    所以这里的指标仅作一个相对的参考,具体真实指标需要使用测试代码处理
    """
    model.eval()
    tqdm_iter = tqdm(enumerate(data_loader),
                     total=len(data_loader),
                     leave=False)

    if arg_config['use_tta']:
        construct_print("We will use Test Time Augmentation!")
        transforms = tta.Compose([  # 2*3
            tta.HorizontalFlip(),
            tta.Scale(scales=[0.75, 1, 1.5],
                      interpolation='bilinear',
                      align_corners=False)
        ])
    else:
        transforms = None

    results = defaultdict(list)
    for test_batch_id, test_data in tqdm_iter:
        tqdm_iter.set_description(f"te=>{test_batch_id + 1}")

        with torch.no_grad():
            curr_jpegs = test_data["image"].to(DEVICES, non_blocking=True)
            curr_flows = test_data["flow"].to(DEVICES, non_blocking=True)
            preds_logits = tta_aug(model=model,
                                   transforms=transforms,
                                   data=dict(curr_jpeg=curr_jpegs,
                                             curr_flow=curr_flows))
            preds_prob = preds_logits.sigmoid().squeeze().cpu().detach(
            )  # float32

        for i, pred_prob in enumerate(preds_prob.numpy()):
            curr_mask_path = test_data["mask_path"][i]
            video_name, mask_name = curr_mask_path.split(os.sep)[-2:]
            mask = read_binary_array(curr_mask_path, thr=0)
            mask_h, mask_w = mask.shape

            pred_prob = cv2.resize(pred_prob,
                                   dsize=(mask_w, mask_h),
                                   interpolation=cv2.INTER_LINEAR)
            pred_prob = clip_to_normalize(data_array=pred_prob,
                                          clip_range=arg_config["clip_range"])
            pred_seg = np.where(pred_prob > 0.5, 255, 0).astype(np.uint8)

            results[video_name].append(
                (jaccard.db_eval_iou(annotation=mask, segmentation=pred_seg),
                 f_boundary.db_eval_boundary(annotation=mask,
                                             segmentation=pred_seg)))

            if save_path:
                pred_video_path = os.path.join(save_path, video_name)
                if not os.path.exists(pred_video_path):
                    os.makedirs(pred_video_path)
                pred_frame_path = os.path.join(pred_video_path, mask_name)
                cv2.imwrite(pred_frame_path, pred_seg)

    j_f_collection = []
    for video_name, video_scores in results.items():
        j_f_for_video = np.mean(np.array(video_scores), axis=0).tolist()
        results[video_name] = j_f_for_video
        j_f_collection.append(j_f_for_video)
    results['average'] = np.mean(np.array(j_f_collection), axis=0).tolist()
    return pretty_print(results)
Esempio n. 27
0
    def test_tta(self, mode='train', unet_path=None):
        """Test model & Calculate performances."""
        print(char_color('@,,@   %s with TTA' % (mode)))
        if not unet_path is None:
            if os.path.isfile(unet_path):
                checkpoint = torch.load(unet_path)
                self.unet.load_state_dict(checkpoint['state_dict'])
                self.myprint('Successfully Loaded from %s' % (unet_path))

        self.unet.train(False)
        self.unet.eval()

        if mode == 'train':
            data_lodear = self.train_loader
        elif mode == 'test':
            data_lodear = self.test_loader
        elif mode == 'valid':
            data_lodear = self.valid_loader

        acc = 0.  # Accuracy
        SE = 0.  # Sensitivity (Recall)
        SP = 0.  # Specificity
        PC = 0.  # Precision
        DC = 0.  # Dice Coefficient
        IOU = 0.  # IOU
        length = 0

        # model pre for each image
        detail_result = []  # detail_result = [id, acc, SE, SP, PC, dsc, IOU]
        with torch.no_grad():
            for i, sample in enumerate(data_lodear):
                (image_paths, images, GT) = sample
                images_path = list(image_paths)
                images = images.to(self.device)
                GT = GT.to(self.device)

                tta_trans = tta.Compose([
                    tta.VerticalFlip(),
                    tta.HorizontalFlip(),
                    tta.Rotate90(angles=[0, 180])
                ])

                tta_model = tta.SegmentationTTAWrapper(self.unet, tta_trans)
                SR = tta_model(images)

                # SR = self.unet(images)
                SR = F.sigmoid(SR)

                if self.save_image:
                    images_all = torch.cat((images, SR, GT), 0)
                    torchvision.utils.save_image(
                        images_all.data.cpu(),
                        os.path.join(self.result_path, 'images',
                                     '%s_%d_image.png' % (mode, i)),
                        nrow=self.batch_size)

                SR = SR.data.cpu().numpy()
                GT = GT.data.cpu().numpy()

                for ii in range(SR.shape[0]):
                    SR_tmp = SR[ii, :].reshape(-1)
                    GT_tmp = GT[ii, :].reshape(-1)
                    tmp_index = images_path[ii].split(sep)[-1]
                    tmp_index = int(tmp_index.split('.')[0][:])

                    SR_tmp = torch.from_numpy(SR_tmp).to(self.device)
                    GT_tmp = torch.from_numpy(GT_tmp).to(self.device)

                    result_tmp = np.array([
                        tmp_index,
                        get_accuracy(SR_tmp, GT_tmp),
                        get_sensitivity(SR_tmp, GT_tmp),
                        get_specificity(SR_tmp, GT_tmp),
                        get_precision(SR_tmp, GT_tmp),
                        get_DC(SR_tmp, GT_tmp),
                        get_IOU(SR_tmp, GT_tmp)
                    ])

                    acc += result_tmp[1]
                    SE += result_tmp[2]
                    SP += result_tmp[3]
                    PC += result_tmp[4]
                    DC += result_tmp[5]
                    IOU += result_tmp[6]
                    detail_result.append(result_tmp)

                    length += 1

        accuracy = acc / length
        sensitivity = SE / length
        specificity = SP / length
        precision = PC / length
        disc = DC / length
        iou = IOU / length
        detail_result = np.array(detail_result)

        if (self.save_detail_result
            ):  # detail_result = [id, acc, SE, SP, PC, dsc, IOU]
            excel_save_path = os.path.join(self.result_path,
                                           mode + '_pre_detial_result.xlsx')
            writer = pd.ExcelWriter(excel_save_path)
            detail_result = pd.DataFrame(detail_result)
            detail_result.to_excel(writer, mode, float_format='%.5f')
            writer.save()
            writer.close()

        return accuracy, sensitivity, specificity, precision, disc, iou
Esempio n. 28
0
# #### Load model
multilabel_resnet34 = models.resnet34(pretrained=False)
multilabel_resnet34.fc = torch.nn.Linear(multilabel_resnet34.fc.in_features,
                                         len(CLASSES))
multilabel_resnet34 = load_dataparallel_model(
    multilabel_resnet34, torch.load(model_multilabel_path))
multilabel_resnet34 = torch.nn.DataParallel(multilabel_resnet34,
                                            device_ids=range(
                                                torch.cuda.device_count()))
multilabel_resnet34.to(DEVICE)
multilabel_resnet34.eval()

multilabel_resnet34 = tta.ClassificationTTAWrapper(multilabel_resnet34,
                                                   tta.Compose([
                                                       tta.HorizontalFlip(),
                                                       tta.VerticalFlip(),
                                                       tta.Scale([0.85, 1.15]),
                                                   ]),
                                                   merge_mode="mean")

# #### Inference
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class_th = torch.tensor([0.0005, 0.2, 0.005, 0.02, 0.005, 0.005, 0.008,
                         0.005]).to(DEVICE)

total_preds, total_paths = [], []


@toma.batch(initial_batchsize=batch_size_multilabel)
def run_multilabel(batch_size):
    inference_loader = DataLoader(inference_dataset,
    [A.Normalize(mean=mean, std=std, max_pixel_value=max_value),
     ToTensorV2()])

_, data_loader, _ = get_train_val_loaders(
    train_ds,
    val_ds,
    train_transforms=transforms,
    val_transforms=transforms,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=batch_size,
    pin_memory=True,
)

prepare_batch = inference_prepare_batch_f32

# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=mean, std=std)

#################### Model ####################

model = FPN(encoder_name='se_resnext50_32x4d', classes=2, encoder_weights=None)
run_uuid = "5230c20f609646cb9870a211036ea5cb"
weights_filename = "best_model_67_val_miou_bg=0.7574240313552584.pth"

has_targets = True

tta_transforms = tta.Compose([
    tta.Rotate90(angles=[90, -90, 180]),
])
Esempio n. 30
0
        label = self.label_df.iloc[index,1:].values.astype('float')

        if self.transforms:            
            image = self.transforms(image=image)['image'] / 255.0

        return image, label

base_transforms = {
    'test' : albumentations.Compose([        
        albumentations.pytorch.ToTensorV2(),
        ]),
}

tta_transforms = tta.Compose(
    [
        tta.Rotate90(angles=[0, 90, 180, 270]),
        # tta.Scale(scales=[0.9,1.0,1.1]), ## batch1로 했었음
    ]
)


def main():
    device = "cuda:0" if torch.cuda.is_available() else "cpu"

    parser = argparse.ArgumentParser()
    parser.add_argument('--image_path', type=str, default="./data/test_dirty_mnist_2nd/")
    parser.add_argument('--label_path', type=str, default="./data/sample_submission.csv")
    parser.add_argument('--weight_path', type=str, default='./save/kfold_202119/')
    parser.add_argument('--out_path', type=str, default='./save/kfold_202119/')

    parser.add_argument('--model', type=str, default='efficientnet-b8')    
    parser.add_argument('--batch_size', type=int, default=1)