Esempio n. 1
0
def wrap_border(mask_src: np.ndarray) -> np.ndarray:
    mask = mask_src.copy()
    mask[14:27, :] = (mask[14:27, :] + vflip(mask[:13, :])) / 2
    mask[:, 14:27] = (mask[:, 14:27] + hflip(mask[:, :13])) / 2
    mask[99:113, :] = (mask[99:113, :] + vflip(mask[114:, :])) / 2
    mask[:, 99:113] = (mask[:, 99:113] + hflip(mask[:, 114:])) / 2
    return center_crop(mask, 101, 101)
Esempio n. 2
0
def spacialAg(img1,img2):


    # 水平翻转
    if np.random.random() < 0.5:
        img1 = functional.hflip(img1)
        img2 = functional.hflip(img2)
    # 垂直翻转
    if np.random.random() < 0.5:
        img1 = functional.vflip(img1)
        img2 = functional.vflip(img2)

    # 绕图像中心旋转
    if np.random.random() < 0.5:
        angle = np.random.uniform(-20, 20)
        scale = np.random.uniform(1 - 0.1, 1 + 0.1)
        dx = np.random.uniform(-0.0625, 0.0625)
        dy = np.random.uniform(-0.0625, 0.0625)
        img1 = functional.shift_scale_rotate(img1, angle, scale, dx, dy, interpolation=cv2.INTER_LINEAR,
                                             border_mode=cv2.BORDER_CONSTANT)
        img2 = functional.shift_scale_rotate(img2, angle, scale, dx, dy, interpolation=cv2.INTER_LINEAR,
                                             border_mode=cv2.BORDER_CONSTANT)

    # 网格扭曲
    if np.random.random() < 0.5:
        num_steps = 5
        distort_limit = (-0.3, 0.3)
        stepsx = [1 + np.random.uniform(distort_limit[0], distort_limit[1]) for i in
                  range(num_steps + 1)]
        stepsy = [1 + np.random.uniform(distort_limit[0], distort_limit[1]) for i in
                  range(num_steps + 1)]

        img1 = functional.grid_distortion(img1, num_steps, stepsx, stepsy, interpolation=cv2.INTER_LINEAR,
                                          border_mode=cv2.BORDER_CONSTANT)
        img2 = functional.grid_distortion(img2, num_steps, stepsx, stepsy, interpolation=cv2.INTER_LINEAR,
                                          border_mode=cv2.BORDER_CONSTANT)

    #  弹性扭曲
    if np.random.random() < 0.5:
        alpha = 1
        sigma = 50
        alpha_affine = 50
        interpolation = cv2.INTER_LINEAR
        random_state = np.random.randint(0, 10000)
        img1 = functional.elastic_transform_fast(img1, alpha, sigma, alpha_affine, interpolation,
                                                 cv2.BORDER_CONSTANT, np.random.RandomState(random_state))
        img2 = functional.elastic_transform_fast(img2, alpha, sigma, alpha_affine, interpolation, cv2.BORDER_CONSTANT,
                                                 np.random.RandomState(random_state))


    return img1, img2
Esempio n. 3
0
 def predict(self, x, ave='temperture'):
     for i in range(x.shape[0]):
         out1 = self.model.predict(np.expand_dims(F.vflip(x[i, ]), axis=0))
         out2 = self.model.predict(np.expand_dims(F.hflip(x[i, ]), axis=0))
         out3 = self.model.predict(
             np.expand_dims(F.vflip(F.hflip(x[i, ])), axis=0))
         out4 = self.model.predict(np.expand_dims(x[i, ], axis=0))
         ave = (F.vflip(out1) + F.hflip(out2) + F.hflip(F.vflip(out3)) +
                out4) / 4
         if i == 0:
             output = ave
         else:
             output = np.concatenate((output, ave), axis=0)
     return output
Esempio n. 4
0
 def apply(self, img, **params):
     if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:
         # Opencv is faster than numpy only in case of
         # non-gray scale 8bits images
         return af.hflip_cv2(img)
     else:
         return af.hflip(img)
Esempio n. 5
0
def ten_crop(img: numpy.ndarray,
             size: List[int],
             vertical_flip: bool = False) -> List[numpy.ndarray]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
    flipped version of these (horizontal flipping is used by default).
    Args:
        img (numpy.ndarray): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
            made. If provided a sequence of length 1, it will be interpreted
            as (size[0], size[0]).
        vertical_flip (bool): Use vertical flipping instead of horizontal
    Returns:
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
    """

    first_five = five_crop(img, size)

    if vertical_flip:
        img = F.vflip(img)
    else:
        img = F.hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five
Esempio n. 6
0
def test_hflip_float(target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]],
                   dtype=np.float32)
    expected = np.array([[0.4, 0.4, 0.4], [0.4, 0.4, 0.0], [0.4, 0.0, 0.0]],
                        dtype=np.float32)
    img, expected = convert_2d_to_target_format([img, expected], target=target)
    flipped_img = F.hflip(img)
    assert_array_almost_equal_nulp(flipped_img, expected)
def test_hflip(target):
    img = np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]], dtype=np.uint8)
    expected_output = np.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]],
                               dtype=np.uint8)
    if target == 'image':
        img = convert_2d_to_3d(img)
        expected_output = convert_2d_to_3d(expected_output)
    flipped_img = F.hflip(img)
    assert np.array_equal(flipped_img, expected_output)
Esempio n. 8
0
def test_hflip(target):
    img = np.array(
        [[1, 1, 1],
         [0, 1, 1],
         [0, 0, 1]], dtype=np.uint8)
    expected = np.array(
        [[1, 1, 1],
         [1, 1, 0],
         [1, 0, 0]], dtype=np.uint8)
    img, expected = convert_2d_to_target_format([img, expected], target=target)
    flipped_img = F.hflip(img)
    assert np.array_equal(flipped_img, expected)
def test_hflip(target):
    img = np.array(
        [[1, 1, 1],
         [0, 1, 1],
         [0, 0, 1]], dtype=np.uint8)
    expected_output = np.array(
        [[1, 1, 1],
         [1, 1, 0],
         [1, 0, 0]], dtype=np.uint8)
    if target == 'image':
        img = convert_2d_to_3d(img)
        expected_output = convert_2d_to_3d(expected_output)
    flipped_img = F.hflip(img)
    assert np.array_equal(flipped_img, expected_output)
Esempio n. 10
0
@pytest.mark.parametrize("target", ["image", "image_4_channels"])
def test_hflip_float(target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]],
                   dtype=np.float32)
    expected = np.array([[0.4, 0.4, 0.4], [0.4, 0.4, 0.0], [0.4, 0.0, 0.0]],
                        dtype=np.float32)
    img, expected = convert_2d_to_target_format([img, expected], target=target)
    flipped_img = F.hflip(img)
    assert_array_almost_equal_nulp(flipped_img, expected)


@pytest.mark.parametrize("target", ["image", "mask"])
@pytest.mark.parametrize(
    ["code", "func"],
    [[0, F.vflip], [1, F.hflip], [-1, lambda img: F.vflip(F.hflip(img))]])
def test_random_flip(code, func, target):
    img = np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]], dtype=np.uint8)
    img = convert_2d_to_target_format([img], target=target)
    assert np.array_equal(F.random_flip(img, code), func(img))


@pytest.mark.parametrize("target", ["image", "image_4_channels"])
@pytest.mark.parametrize(
    ["code", "func"],
    [[0, F.vflip], [1, F.hflip], [-1, lambda img: F.vflip(F.hflip(img))]])
def test_random_flip_float(code, func, target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]],
                   dtype=np.float32)
    img = convert_2d_to_target_format([img], target=target)
    assert_array_almost_equal_nulp(F.random_flip(img, code), func(img))
    expected_output = np.array(
        [[1, 1, 1],
         [1, 1, 0],
         [1, 0, 0]], dtype=np.uint8)
    if target == 'image':
        img = convert_2d_to_3d(img)
        expected_output = convert_2d_to_3d(expected_output)
    flipped_img = F.hflip(img)
    assert np.array_equal(flipped_img, expected_output)


@pytest.mark.parametrize('target', ['image', 'mask'])
@pytest.mark.parametrize(['code', 'func'], [
    [0, F.vflip],
    [1, F.hflip],
    [-1, lambda img: F.vflip(F.hflip(img))],

])
def test_random_flip(code, func, target):
    img = np.array(
        [[1, 1, 1],
         [0, 1, 1],
         [0, 0, 1]], dtype=np.uint8)
    if target == 'image':
        img = convert_2d_to_3d(img)
    assert np.array_equal(F.random_flip(img, code), func(img))


@pytest.mark.parametrize(['input_shape', 'expected_shape'], [
    [(128, 64), (64, 128)],
    [(128, 64, 3), (64, 128, 3)],
Esempio n. 12
0
@pytest.mark.parametrize('target', ['image', 'image_4_channels'])
def test_hflip_float(target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]],
                   dtype=np.float32)
    expected = np.array([[0.4, 0.4, 0.4], [0.4, 0.4, 0.0], [0.4, 0.0, 0.0]],
                        dtype=np.float32)
    img, expected = convert_2d_to_target_format([img, expected], target=target)
    flipped_img = F.hflip(img)
    assert_array_almost_equal_nulp(flipped_img, expected)


@pytest.mark.parametrize('target', ['image', 'mask'])
@pytest.mark.parametrize(['code', 'func'], [
    [0, F.vflip],
    [1, F.hflip],
    [-1, lambda img: F.vflip(F.hflip(img))],
])
def test_random_flip(code, func, target):
    img = np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]], dtype=np.uint8)
    img = convert_2d_to_target_format([img], target=target)
    assert np.array_equal(F.random_flip(img, code), func(img))


@pytest.mark.parametrize('target', ['image', 'image_4_channels'])
@pytest.mark.parametrize(['code', 'func'], [
    [0, F.vflip],
    [1, F.hflip],
    [-1, lambda img: F.vflip(F.hflip(img))],
])
def test_random_flip_float(code, func, target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]],
Esempio n. 13
0
 def albumentations(self, img):
     return albumentations.hflip(img)
Esempio n. 14
0
 def albumentations(self, img):
     return albumentations.hflip(img)
Esempio n. 15
0
 def albumentations_fn(self, image, arg):
     return F.hflip(image)
Esempio n. 16
0
 def albumentations(self, img):
     if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:
         return albumentations.hflip_cv2(img)
     else:
         return albumentations.hflip(img)
def predict_with_snapshot(snapshot_dir: str, arch: str, device: str, fn: str,
                          th: float, masks_fn: str):
    logger = configure_logger('prediction', logging.INFO, './logs')
    test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))

    if masks_fn is None:
        test_images = np.load(os.path.join(DATA_DIR, 'test_images.npy'))
        test_ids = test.index.tolist()
        model = architectures[arch]().to(device)
        test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))
        test_images = np.load(os.path.join(DATA_DIR, 'test_images.npy'))
        test_ids = test.index.tolist()
        tta_augs = [val_augmentations, flip_pad]
        tta_predictions = []
        for cycle_dir in os.listdir(snapshot_dir):
            snapshots = os.listdir(os.path.join(snapshot_dir, cycle_dir))
            best_snapshot = sorted(snapshots,
                                   key=lambda x: int(x.split('.')[-1]),
                                   reverse=True)[0]

            state_dict = torch.load(
                os.path.join(snapshot_dir, cycle_dir, best_snapshot))

            model.load_state_dict(state_dict)
            model.eval()
            logger.info(f'Loaded model from {best_snapshot}')

            for i, aug in enumerate(tta_augs):
                test_dataset = SaltTestDataset(test_ids, test_images, aug)
                test_loader = DataLoader(test_dataset, 30, shuffle=False)

                # actual prediction is made here
                masks = []
                with torch.no_grad():
                    for batch in tqdm.tqdm(test_loader):
                        image = batch['image'].to(device)
                        y_pred = torch.sigmoid(model(image)).cpu().numpy()
                        masks.append(y_pred)

                # postprocess masks (crop, threshold, rle)
                masks = np.concatenate(masks).reshape(
                    (len(test), NET_INPUT_SIZE, NET_INPUT_SIZE))
                # TODO: replace that with smth that makes more sens
                if i == 1:
                    masks = [hflip(mask) for mask in masks]
                tta_predictions.append(masks)

        masks = np.mean(tta_predictions, axis=0)
        np.save(os.path.join(SUBMISSION_DIR, 'raw_masks_fold1_scnd.npy'),
                masks)
    else:
        masks = np.load(os.path.join(SUBMISSION_DIR, masks_fn))

    masks = [(center_crop(mask, SRC_SIZE, SRC_SIZE) > th).astype(int)
             for mask in masks]
    rle_masks = [get_mask_rle(mask) for mask in masks]
    test['rle_mask'] = rle_masks
    # TODO: get some stats on empty masks etc. there and log it too
    submission_fn = os.path.join(SUBMISSION_DIR,
                                 f'{fn}_{get_current_datetime()}.csv')
    test.to_csv(submission_fn, index=None)
    logger.info(f'Saved submission to {submission_fn}')
Esempio n. 18
0
    img, expected = convert_2d_to_target_format([img, expected], target=target)
    flipped_img = F.hflip(img)
    assert np.array_equal(flipped_img, expected)


@pytest.mark.parametrize("target", ["image", "image_4_channels"])
def test_hflip_float(target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]], dtype=np.float32)
    expected = np.array([[0.4, 0.4, 0.4], [0.4, 0.4, 0.0], [0.4, 0.0, 0.0]], dtype=np.float32)
    img, expected = convert_2d_to_target_format([img, expected], target=target)
    flipped_img = F.hflip(img)
    assert_array_almost_equal_nulp(flipped_img, expected)


@pytest.mark.parametrize("target", ["image", "mask"])
@pytest.mark.parametrize(["code", "func"], [[0, F.vflip], [1, F.hflip], [-1, lambda img: F.vflip(F.hflip(img))]])
def test_random_flip(code, func, target):
    img = np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]], dtype=np.uint8)
    img = convert_2d_to_target_format([img], target=target)
    assert np.array_equal(F.random_flip(img, code), func(img))


@pytest.mark.parametrize("target", ["image", "image_4_channels"])
@pytest.mark.parametrize(["code", "func"], [[0, F.vflip], [1, F.hflip], [-1, lambda img: F.vflip(F.hflip(img))]])
def test_random_flip_float(code, func, target):
    img = np.array([[0.4, 0.4, 0.4], [0.0, 0.4, 0.4], [0.0, 0.0, 0.4]], dtype=np.float32)
    img = convert_2d_to_target_format([img], target=target)
    assert_array_almost_equal_nulp(F.random_flip(img, code), func(img))


@pytest.mark.parametrize(["input_shape", "expected_shape"], [[(128, 64), (64, 128)], [(128, 64, 3), (64, 128, 3)]])