def __init__(self, image_size=(126, 32)):
     self.image_size = image_size
     self.background_blend = BackgroundBlend(
         "/home/agarcia/repos/mafat-radar-challenge/mafat_radar_challenge/data/mafat_background_v9_spectrogram.npy",
         alpha=0.8,
         p=0.2,
     )
     self.gaussian_filter = GaussianFilter(kernel_size=(20, 1))
     self.rolling_x = RollingX(shift=(-20, 20))
     self.rolling_y = RollingY(shift=(-20, 20))
     self.delta = Delta()
     self.deltadelta = DeltaDelta()
     self.background_supp = BackgroundSuppression()
     self.freq_mask = FreqMask(F=(5, 25), num_masks=(1, 3))
     self.time_mask = TimeMask(T=(1, 5), num_masks=(1, 10))
     self.aug = Compose(
         [
             Lambda(self.rolling_x.transform),
             Lambda(self.rolling_y.transform),
             # Lambda(self.background_supp.transform),  # Background suppresion
             Lambda(minmax_norm),  # This is needed for Noise and Blur addition
             HorizontalFlip(p=0.5),
             VerticalFlip(p=0.5),
             Rotate(limit=(180, 180), p=0.5),
             # ShiftScaleRotate(
             #     shift_limit=0.1,
             #     scale_limit=0,
             #     rotate_limit=0,
             #     p=0.5,
             #     border_mode=cv2.BORDER_CONSTANT,
             # ),
             # OneOf(
             #     [
             #         MultiplicativeNoise(
             #             multiplier=[0.8, 1.3], elementwise=True, p=0.25
             #         ),
             #         GaussianBlur(p=0.25, blur_limit=(1, 3)),
             #     ]
             # ),
             # RandomBrightnessContrast(
             #     brightness_limit=0.1, contrast_limit=0.1, p=0.1
             # ),
             # Cutout(
             #     num_holes=1,
             #     max_h_size=int(0.2 * self.image_size[0]),
             #     max_w_size=int(0.2 * self.image_size[1]),
             #     p=0.5,
             # ),
             # Lambda(self.delta.transform),
             # Lambda(self.deltadelta.transform),
             # Lambda(self.background_blend.transform),
             # Lambda(self.gaussian_filter.transform),  # Gaussian
             Lambda(self.time_mask.transform),
             Lambda(self.freq_mask.transform),
             # iaa.CenterCropToFixedSize(height=90, width=None),
         ]
     )
 def build_test(self):
     return Compose([
         # iaa.CenterCropToFixedSize(height=90, width=None).augment_image,
         # iaa.Lambda(func_images=self.gaussian_filter.func_images).augment_image,  # Gaussian
         # Lambda(self.background_supp.transform),  # Background suppresion
         Lambda(minmax_norm),
         # CenterCrop(100, 32),
         Resize(
             self.image_size[0],
             self.image_size[1],
             interpolation=cv2.INTER_CUBIC,
         ),
         Lambda(normalize),
         ToTensor(),
     ])
 def build_train(self):
     return Compose([
         self.aug,
         # RandomCrop(100, 32),
         Resize(
             self.image_size[0],
             self.image_size[1],
             interpolation=cv2.INTER_CUBIC,
         ),
         Lambda(normalize),
         # T.RandomErasing(),
         ToTensor(),
     ])
Example #4
0
    new_image = canvas + image

    #limiting brightness to 255
    new_image = np.vectorize(lambda x: x if x < 255 else 255)(new_image)

    return new_image


augmentations = Compose([
    ShiftScaleRotate(shift_limit=0.07,
                     scale_limit=0.07,
                     rotate_limit=50,
                     border_mode=cv2.BORDER_CONSTANT,
                     value=0,
                     p=0.95),
    Lambda(image=randomBlotch, p=0.7),
    ToFloat(max_value=255)  # normalizes the data to [0,1] in float
])


class AugmentedDataSequence(Sequence):
    def __init__(self, x_set, y_set, batch_size=32):
        self.x = x_set
        self.y = y_set
        self.batch_size = batch_size

    def __len__(self):
        return int(np.ceil((len(self.x) + 0.0) / self.batch_size))

    def __getitem__(self, index):
        x_batch = self.x[index * self.batch_size:(index + 1) * self.batch_size]
Example #5
0
            std=[0.229, 0.224, 0.225],
        ),
            #ToTensorV2(),
    ])


model = smp.Unet(encoder_name='se_resnext50_32x4d', 
                 encoder_weights='imagenet', 
                 activation='sigmoid')

model = torch.load(os.path.join(path.realpath(path.curdir)+MODEL_SAVE_DIR, 'model.pth'), map_location=torch.device(device))
model.to(device);
model.eval();


identity_trfm = Lambda(image = lambda x,cols=None,rows=None : x)
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)

transforms = ValTransforms()

p = pathlib.Path(IMAGE_DIR)
subm = {}

for i, filename in enumerate(p.glob('*.tiff')):

    dataset = rasterio.open(filename.as_posix(), transform=identity)
    print(dataset.shape)
    slices = make_grid(dataset.shape, window=WINDOW, min_overlap=MIN_OVERLAP)
    preds = np.zeros(dataset.shape, dtype=np.uint8)
    
    for (x1,x2,y1,y2) in slices: