示例#1
0
def create_image_and_mask_augmenter():
    aug = iaa.SomeOf(
        (0, None),
        [
            iaa.Noop(),
            iaa.Scale((0.75, 1.0)),
            iaa.CropAndPad(percent=(-0.25, 0.25)),
            iaa.Affine(translate_px={
                "x": (-20, 20),
                "y": (-20, 20)
            }),
            #iaa.PiecewiseAffine(scale=(0.01, 0.05)),
            iaa.Fliplr(1),
            iaa.Flipud(1)
        ])
    return aug
def get_training_augmenter(height, width, augmenter_level):
    """
    Get validation augmenter according to the level of stochasticity added
    """
    aug = iaa.Sequential(
        [
            iaa.Fliplr(0.5),  # horizontal flips
            *get_noise_augmenters(augmenter_level),  # noisy heavy or simple
            iaa.Scale({
                "height": height,
                "width": width
            })
        ],
        random_order=True)  # apply augmenters in random order

    return aug
    def inference_rgb(self, img_data, orgshape):
        scale = (orgshape[0] * 1.0 / self.inres[0],
                 orgshape[1] * 1.0 / self.inres[1])

        img_scale = iaa.Scale({
            "height": self.inres[0],
            "width": self.inres[1]
        })

        img_data = img_scale.augment_image(img_data)
        img_data = dg.normalize_img(img_data)

        input = img_data[np.newaxis, :, :, :]
        out = self.model.predict(input)
        if self.num_hgstacks > 1:
            out = out[-1]
        return out, scale
示例#4
0
 def __init__(self):
     self.augmentor_pipeline = Pipeline()
     self.augmentor_pipeline.add_operation(
         Operations.Crop(probability=1, width=64, height=64, centre=False))
     self.augmentor_pipeline.add_operation(
         Operations.Resize(probability=1,
                           width=512,
                           height=512,
                           resample_filter="BILINEAR"))
     self.imgaug_transform = iaa.Sequential([
         iaa.CropToFixedSize(width=64, height=64),
         iaa.Scale(size=512, interpolation="linear")
     ])
     self.solt_stream = slc.Stream([
         slt.CropTransform(crop_size=(64, 64), crop_mode="r"),
         slt.ResizeTransform(resize_to=(512, 512))
     ])
示例#5
0
def iaa_letterbox(img, new_dim):
    if isinstance(img, tuple):
        org_dim = img
    else:
        org_dim = img.shape[1], img.shape[0]

    padded_w, padded_h, x_pad, y_pad, ratio = letterbox_transforms(
        *org_dim, *new_dim)
    l_pad, r_pad = x_pad, new_dim[0] - padded_w - x_pad
    t_pad, b_pad = y_pad, new_dim[1] - padded_h - y_pad
    lb_reverter = np.array(
        [org_dim[0], org_dim[1], padded_w, padded_h, x_pad, y_pad])

    return iaa.Sequential([iaa.Scale({ "width": padded_w, "height": padded_h }),
                           iaa.Pad(px=(t_pad, r_pad, b_pad, l_pad), keep_size=False, pad_cval=128),
                          ]), \
           lb_reverter
示例#6
0
def get_imgaug_sequence_for_camvid(width=None, height=None):
	if height is not None and width is not None:
		seq = iaa.Sequential([
			iaa.SomeOf(1, [
				#iaa.Sometimes(0.5, iaa.Crop(px=(0, 100))),  # Crop images from each side by 0 to 16px (randomly chosen).
				iaa.Sometimes(0.5, iaa.Crop(percent=(0, 0.25))), # Crop images by 0-10% of their height/width.
				iaa.Fliplr(0.5),  # Horizontally flip 50% of the images.
				iaa.Flipud(0.5),  # Vertically flip 50% of the images.
				iaa.Sometimes(0.5, iaa.Affine(
					scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)},  # Scale images to 80-120% of their size, individually per axis.
					translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)},  # Translate by -20 to +20 percent (per axis).
					rotate=(-45, 45),  # Rotate by -45 to +45 degrees.
					shear=(-16, 16),  # Shear by -16 to +16 degrees.
					#order=[0, 1],  # Use nearest neighbour or bilinear interpolation (fast).
					order=0,  # Use nearest neighbour or bilinear interpolation (fast).
					#cval=(0, 255),  # If mode is constant, use a cval between 0 and 255.
					#mode=ia.ALL  # Use any of scikit-image's warping modes.
					mode='reflect'  # Use any of scikit-image's warping modes.
				))
				#iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 3.0)))  # Blur images with a sigma of 0 to 3.0.
			]),
			iaa.Scale(size={'height': height, 'width': width}, interpolation='nearest')  # Resize.
		])
	else:
		seq = iaa.Sequential(
			iaa.SomeOf(1, [
				#iaa.Sometimes(0.5, iaa.Crop(px=(0, 100))),  # Crop images from each side by 0 to 16px (randomly chosen).
				iaa.Sometimes(0.5, iaa.Crop(percent=(0, 0.1))), # Crop images by 0-10% of their height/width.
				iaa.Fliplr(0.5),  # Horizontally flip 50% of the images.
				iaa.Flipud(0.5),  # Vertically flip 50% of the images.
				iaa.Sometimes(0.5, iaa.Affine(
					scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)},  # Scale images to 80-120% of their size, individually per axis.
					translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)},  # Translate by -20 to +20 percent (per axis).
					rotate=(-45, 45),  # Rotate by -45 to +45 degrees.
					shear=(-16, 16),  # Shear by -16 to +16 degrees.
					#order=[0, 1],  # Use nearest neighbour or bilinear interpolation (fast).
					order=0,  # Use nearest neighbour or bilinear interpolation (fast).
					#cval=(0, 255),  # If mode is constant, use a cval between 0 and 255.
					#mode=ia.ALL  # Use any of scikit-image's warping modes.
					mode='reflect'  # Use any of scikit-image's warping modes.
				))
				#iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 3.0)))  # Blur images with a sigma of 0 to 3.0.
			])
		)

	return seq
def modify_image_train(img, bb):
    img_height, img_width = img.shape[0], img.shape[1]
    x_min, y_min, width, height = bb
    x1, y1, x2, y2 = to_imgaugbb(x_min, y_min, width, height)
    bb_array = ia.BoundingBoxesOnImage([ia.BoundingBox(x1, y1, x2, y2)],
                                       shape=(img_height, img_width))

    rgb_img = skimage.color.gray2rgb(img)
    imagenet_height, imagenet_width = 224, 224
    seq = iaa.Sequential(
        [iaa.Scale({
            "height": imagenet_height,
            "weight": imagenet_width
        })])
    seq_det = seq.to_deterministic()
    image_aug = seq_det.augment_images([rgb_img])[0]
    bbs_aug = seq_det.augment_bounding_boxes([bb_array])[0]
    return image_aug, bbs_aug
    def _scale(self, image: np.ndarray, bboxes: np.ndarray) -> Tuple[np.ndarray, ia.BoundingBoxesOnImage]:
        seq_lst = [
            iaa.Affine(scale=0.7, mode='edge'),  # scale image, preserving original image shape
        ]
        if self.resize is not None:
            seq_lst.append(iaa.Scale({"height": self.resize[0], "width": self.resize[1]}))

        seq = iaa.Sequential(seq_lst)

        bbs = self._ndarray_to_BoundingBoxesOnImage(bboxes, image.shape)

        # Make our sequence deterministic.
        # We can now apply it to the image and then to the BBs and it will lead to the same augmentations.
        seq_det = seq.to_deterministic()

        image_aug = seq_det.augment_images([image])[0]
        bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

        return image_aug, bbs_aug
示例#9
0
  def __init__(self):
    self.aug = iaa.Sequential([
        iaa.Scale((224, 224)),
        iaa.Sometimes(0.30, iaa.GaussianBlur(sigma=(0, 3.0))),
				iaa.Sometimes(0.25, iaa.Multiply((0.5, 1.5), per_channel=0.5)),
				iaa.Sometimes(0.20, iaa.Invert(0.25, per_channel=0.5)),
				iaa.Sometimes(0.25, iaa.ReplaceElementwise(
					iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
					iap.Normal(128, 0.4*128),
					per_channel=0.5)
										 ),
				iaa.Sometimes(0.30, iaa.AdditivePoissonNoise(40)),
        iaa.Fliplr(0.5),
        iaa.Affine(rotate=(-20, 20), mode='symmetric'),
        iaa.Sometimes(0.30,
                      iaa.OneOf([iaa.Dropout(p=(0, 0.1)),
                                 iaa.CoarseDropout(0.1, size_percent=0.5)])),
        iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True)
    ])
示例#10
0
 def __init__(self):
     self.aug = iaa.Sequential([
         iaa.Scale((224, 224)),
         iaa.Sometimes(0.25, iaa.GaussianBlur(sigma=(0, 3.0))),
         iaa.Affine(rotate=(-20, 20), mode='constant'),
         iaa.Sometimes(
             0.25,
             iaa.OneOf([
                 iaa.Dropout(p=(0, 0.1)),
                 iaa.CoarseDropout(0.1, size_percent=0.5),
                 iaa.Sharpen(alpha=0.5)
             ])),
         iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True),
         iaa.Sometimes(0.15, iaa.WithChannels(0, iaa.Add((10, 100)))),
         iaa.Sometimes(
             0.05,
             iaa.ChangeColorspace(from_colorspace="RGB",
                                  to_colorspace="HSV"))
     ])
    def _fliplr(self, image: np.ndarray, bboxes: np.ndarray) -> Tuple[np.ndarray, ia.BoundingBoxesOnImage]:
        seq_lst = [
            iaa.Fliplr(1.0),  # horizontally flip
        ]
        if self.resize is not None:
            seq_lst.append(iaa.Scale({"height": self.resize[0], "width": self.resize[1]}))

        seq = iaa.Sequential(seq_lst)

        bbs = self._ndarray_to_BoundingBoxesOnImage(bboxes, image.shape)

        # Make our sequence deterministic.
        # We can now apply it to the image and then to the BBs and it will lead to the same augmentations.
        seq_det = seq.to_deterministic()

        image_aug = seq_det.augment_images([image])[0]
        bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

        return image_aug, bbs_aug
    def _resize(self, image: np.ndarray, bboxes: np.ndarray):
        if self.resize is None:
            raise Exception('--resize option has not been specified but self.resize() '
                            'method was invoked; script logic error')
        seq_lst = [
            iaa.Scale({"height": self.resize[0], "width": self.resize[1]}),  # resize not preserving the aspect ratio
        ]

        seq = iaa.Sequential(seq_lst)

        bbs = self._ndarray_to_BoundingBoxesOnImage(bboxes, image.shape)

        # Make our sequence deterministic.
        # We can now apply it to the image and then to the BBs and it will lead to the same augmentations.
        seq_det = seq.to_deterministic()

        image_aug = seq_det.augment_images([image])[0]
        bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

        return image_aug, bbs_aug
def predict_on_image(model, args):
    # pre-processing on image
    image = cv2.imread(args.data, -1)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    resize = iaa.Scale({'height': args.crop_height, 'width': args.crop_width})
    resize_det = resize.to_deterministic()
    image = resize_det.augment_image(image)
    image = Image.fromarray(image).convert('RGB')
    image = transforms.ToTensor()(image).unsqueeze(0)

    # read csv label path
    label_info = get_label_info(args.csv_path)
    # predict
    model.eval()
    predict = model(image).squeeze()
    predict = reverse_one_hot(predict)
    predict = colour_code_segmentation(np.array(predict), label_info)
    predict = cv2.resize(np.uint8(predict), (960, 720))
    cv2.imwrite(args.save_path,
                cv2.cvtColor(np.uint8(predict), cv2.COLOR_RGB2BGR))
示例#14
0
 def __init__(self, image_size):
     self.image_size = image_size
     self.data = glob.glob(
         '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/xair/land_19_bk/*/*.json'
     )
     self.class_mapix = dict(zip(labels, range(len(labels))))
     self.aug = iaa.Sequential([
         iaa.Flipud(0.5),
         iaa.Fliplr(0.5),
         iaa.Affine(
             scale={
                 "x": (0.9, 1.1),
                 "y": (0.9, 1.1)
             },
             rotate=(-30, 30),
         ),
     ])
     self.factor = iaa.Sequential([
         iaa.Scale(size=(128, 128)),
     ])
示例#15
0
def aligner_preprocessing(img, target, crop_coordinates, augmentation,
                          target_size, bins_nr):
    height, width = target_size

    keypoints = ia.KeypointsOnImage([
        ia.Keypoint(x=int(target.bonnet_x), y=int(target.bonnet_y)),
        ia.Keypoint(x=int(target.blowhead_x), y=int(target.blowhead_y))
    ],
                                    shape=img.shape)

    crop = CropKeypoints(crop_coordinates).to_deterministic()
    scale = iaa.Scale({"height": height, "width": width}).to_deterministic()
    augmenter = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Affine(translate_px={
            "x": (-4, 4),
            "y": (-4, 4)
        },
                   rotate=(-180, 180),
                   scale=(1.0, 1.5)),
    ]).to_deterministic()

    if augmentation:
        transformations = [crop, augmenter, scale]
    else:
        transformations = [crop, scale]

    transformer = iaa.Sequential(transformations).to_deterministic()

    aug_X = transformer.augment_image(img)

    aug_points = transformer.augment_keypoints([keypoints])
    aug_points_formatted = np.reshape(aug_points[0].get_coords_array(),
                                      -1).astype(np.float)
    aug_points_binned = bin_quantizer(aug_points_formatted, (height, width),
                                      bins_nr)

    aug_target_binned = np.hstack(
        [aug_points_binned, target[ALIGNER_AUXILARY_COLUMNS].values])
    return aug_X, aug_target_binned
def augment_images(images, boxes):
    boxes_augs = []
    for box1 in boxes:
        for box in box1:
            boxes_augs.append(
                ia.BoundingBox(x1=box[0], y1=box[1], x2=box[2], y2=box[3]))

    bbs = ia.BoundingBoxesOnImage(boxes_augs, shape=images[0].shape)

    seq = iaa.Sequential([
        iaa.Scale(400),
    ])

    seq_det = seq.to_deterministic(
    )  # Call this once PER BATCH, otherwise you will always get the to get random
    images_data = images
    # for i,img in enumerate(images):
    #     images_data[i,:,:,:]=img[:,:,:]
    image_augs = seq_det.augment_images(images_data)
    bbs_augs = seq_det.augment_bounding_boxes([bbs])
    return (image_augs, bbs_augs)
示例#17
0
def edit_image(image_name):
    image = read_as_pil_image(image_name)
    if config.IMAGE_SCALE != 1:
        image = apply_effect(image, augmenters.Scale(config.IMAGE_SCALE))
    if config.IMAGE_ROTATION != 0:
        image = image.rotate(config.IMAGE_ROTATION)
    if config.IMAGE_SHEAR != 0:
        image = apply_effect(image,
                             augmenters.Affine(shear=config.IMAGE_SHEAR))
    if config.IMAGE_BLUR > 0:
        image = apply_effect(image,
                             augmenters.GaussianBlur(sigma=config.IMAGE_BLUR))
    if config.IMAGE_SHARPEN != 0:
        image = apply_effect(
            image, augmenters.Sharpen(alpha=config.IMAGE_SHARPEN,
                                      lightness=1.0))
    if config.IMAGE_EMBOSS != 0:
        image = apply_effect(
            image, augmenters.Emboss(alpha=config.IMAGE_EMBOSS, strength=1.0))
    if config.IMAGE_DISTORTIONS != 0:
        image = apply_effect(
            image, augmenters.PiecewiseAffine(scale=config.IMAGE_DISTORTIONS))
    if config.IMAGE_PIXEL_DISPLACEMENT != 0:
        image = apply_effect(
            image,
            augmenters.ElasticTransformation(
                alpha=config.IMAGE_PIXEL_DISPLACEMENT, sigma=0.25))
    if config.IMAGE_BRIGHTNESS_INCREMENT != 1:
        image = ImageEnhance.Brightness(image).enhance(
            config.IMAGE_BRIGHTNESS_INCREMENT)
    if config.IMAGE_BRIGHTNESS_REDUCTION != 1:
        image = ImageEnhance.Brightness(image).enhance(
            config.IMAGE_BRIGHTNESS_REDUCTION)
    if config.IMAGE_CONTRAST_INCREMENT != 1:
        image = ImageEnhance.Contrast(image).enhance(
            config.IMAGE_CONTRAST_INCREMENT)
    if config.IMAGE_CONTRAST_REDUCTION != 1:
        image = ImageEnhance.Contrast(image).enhance(
            config.IMAGE_CONTRAST_REDUCTION)
    save_as_jpg(image_name, image)
示例#18
0
def predict_on_image(model, epoch, csv_path, args):
    # pre-processing on image
    image = cv2.imread("demo/ceshi.png", -1)
    #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    resize = iaa.Scale({'height': args.crop_height, 'width': args.crop_width})
    resize_det = resize.to_deterministic()
    image = resize_det.augment_image(image)
    image = Image.fromarray(image).convert('RGB')
    image = transforms.ToTensor()(image).unsqueeze(0)
    #read csv label path
    label_info = reader_csv(csv_path)
    # predict
    model.eval()
    predict,_,_ = model(image.cuda())
    #with torch.no_grad():
        #image1 = cv2.imread("demo/ceshi.png", -1)
        #predict = model(image.cuda())
        #predict=predict.cpu().numpy()
        #predict=predict[0,1,:,:]


        #pmin=np.min(predict)
        #pmax=np.max(predict)
        #predict=((predict-pmin)/(pmax-pmin+0.000001))*225
        #predict=predict.astype(np.uint8)
        #predict=cv2.applyColorMap(predict,cv2.COLORMAP_JET)
        #predict=predict[:,:,::-1]
        #predict = image1+predict*0.3
        #plt.imshow(predict, cmap='gray')
        #save_path = 'demo/epoch_%d.png' % (epoch)
        #cv2.imwrite(save_path, cv2.cvtColor(np.uint8(predict), cv2.COLOR_RGB2BGR))
    w =predict.size()[-1]
    c =predict.size()[-3]
    predict = predict.resize(c,w,w)
    predict = reverse_one_hot(predict)
    predict = colour_code_segmentation(np.array(predict.cpu()), label_info)
    predict = cv2.resize(np.uint8(predict), (224, 224))
    save_path = 'demo/epoch_%d.png' % (epoch)
    cv2.imwrite(save_path, cv2.cvtColor(np.uint8(predict), cv2.COLOR_RGB2BGR))
    cv2.imwrite(save_path, cv2.cvtColor(np.uint8(predict), cv2.COLOR_RGB2BGR))
示例#19
0
 def __init__(self,
              root_dr,
              image_size,
              mask_pool_size=28,
              output_size=[128, 128]):
     self.root_dr = root_dr
     self.image_size = image_size
     self.mask_pool_size = mask_pool_size
     self.images = glob.glob(os.path.join(root_dr, '*', '*.png'))
     self.aug = iaa.Sequential([
         iaa.Flipud(0.5),
         iaa.Fliplr(0.5),
         iaa.Affine(
             scale={
                 "x": (0.9, 1.1),
                 "y": (0.9, 1.1)
             },
             rotate=(-30, 30),
         ),
     ])
     self.factor = iaa.Sequential([
         iaa.Scale(size=output_size),
     ])
示例#20
0
    def __init__(self, args):
        if args.augment:

            seq_geom = iaa.Sequential([
                RandomCrop(args.image_size, shared_crop=True),
                iaa.Fliplr(0.5),
                iaa.Flipud(0.5),
            ])

            seq_color = iaa.Sequential([])

        else:
            seq_geom = iaa.Sequential([
                iaa.Scale({
                    "height": args.image_size,
                    "width": args.image_size
                }, 'cubic')
            ])

            seq_color = iaa.Sequential([])

        self.seq_geom = seq_geom
        self.seq_color = seq_color
示例#21
0
def classifier_preprocessing(img, target, aligner_coordinates, augmentation, target_size):
    height, width = target_size

    align = AlignKeypoints(aligner_coordinates, (height, width)).to_deterministic()
    scale = iaa.Scale({"height": height, "width": width}).to_deterministic()

    augmenter = iaa.Sequential([iaa.Affine(translate_px={"x": (-4, 4), "y": (-4, 4)},
                                           rotate=(-4, 4),
                                           scale=(1.0, 1.3)),
                                iaa.Flipud(0.5),
                                iaa.Fliplr(0.5),
                                ]).to_deterministic()
    if augmentation:
        transformations = [align, augmenter, scale]
    else:
        transformations = [align, scale]

    transformer = iaa.Sequential(transformations).to_deterministic()

    aug_X = transformer.augment_image(img)

    target_np = target.values.astype(np.int64)
    return aug_X, target_np
示例#22
0
 def transform_batch_images(self, batch_x):
     if self.mode=='train':
         augmenter = iaa.Sequential(
             [
                 iaa.Fliplr(0.3),
                 iaa.Flipud(0.3),
                 iaa.Sometimes(0.3,
                     iaa.SomeOf(1,[
                         iaa.Affine(rotate=(-10,10),cval=0,mode='constant'),
                         iaa.Affine(translate_px=(-10,10),cval=0,mode='constant'),
                         iaa.Crop(percent=(0, 0.1)),
                         iaa.Scale((112,112),
                                 interpolation='cubic',
                                 name=None,
                                 deterministic=False,
                                 random_state=None)
                         iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
                     ])),
             ],
             random_order=True,
         )
         batch_x = augmenter.augment_images(batch_x)
     return batch_x
示例#23
0
    def __call__(self, sample):
        image, pose = sample['image'], sample['pose'].reshape([-1, 2])

        # augmentation choices
        seq = iaa.SomeOf(2, [
            iaa.Sometimes(0.4, iaa.Scale((0.5, 1.0))),
            iaa.Sometimes(
                0.6,
                iaa.CropAndPad(percent=(-0.25, 0.25),
                               pad_mode=["edge"],
                               keep_size=False)),
            iaa.Fliplr(0.1),
            iaa.Sometimes(0.4,
                          iaa.AdditiveGaussianNoise(scale=(0, 0.05 * 50))),
            iaa.Sometimes(0.1, iaa.GaussianBlur(sigma=(0, 3.0)))
        ])
        seq_det = seq.to_deterministic()

        image_aug = seq_det.augment_images([image])[0]
        keypoints_aug = seq_det.augment_keypoints(
            [self.pose2keypoints(image, pose)])[0]

        return {'image': image_aug, 'pose': self.keypoints2pose(keypoints_aug)}
示例#24
0
    def __init__(
            self,
            in_shape,
            im_paths=[],
            truth_paths=None,
            locs2d=None,
            sig_prior=0.1,
            mode='train',
            cuda=False,
            normalize=False,  # compute mean and stdev of dataset
            sometimes_rate=0.5,
            seed=0):

        self.im_paths = im_paths

        self.scale = iaa.Scale(in_shape)
        self.sometimes = lambda aug: iaa.Sometimes(sometimes_rate, aug)

        self.aug_affines = self.sometimes(iaa.Noop())

        self.aug_noise = iaa.AdditiveGaussianNoise(scale=0.1)

        self.truth_paths = truth_paths
        self.locs2d = locs2d
        self.sig_prior = sig_prior
        self.device = torch.device('cuda' if cuda else 'cpu')
        self.in_shape = in_shape
        self.seed = 0

        if (normalize):
            self.mode = 'eval'
            self.mean, self.std = self.comp_normalization_factors()
            self.normalize = utls.NormalizeAug(mean=self.mean, std=self.std)
        else:  # Do scaling
            self.normalize = utls.PixelScaling()

        self.mode = mode
示例#25
0
def predict_on_image(model, args, image):
    '''
        run inference and return the resultant image
    '''
    # pre-processing on image
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    resize = iaa.Scale({'height': args.crop_height, 'width': args.crop_width})
    resize_det = resize.to_deterministic()
    image = resize_det.augment_image(image)
    image = Image.fromarray(image).convert('RGB')
    image = transforms.ToTensor()(image)
    image = transforms.Normalize((0.485, 0.456, 0.406),
                                 (0.229, 0.224, 0.225))(image).unsqueeze(0)
    # read csv label path
    label_info = get_label_info(args.csv_path)
    # predict
    model.eval()
    predict = model(image).squeeze()
    predict = reverse_one_hot(predict)
    # predict = colour_code_segmentation(np.array(predict), label_info)
    predict = colour_code_segmentation(np.array(predict.cpu()), label_info)
    predict = cv2.resize(np.uint8(predict), (960, 720))
    # cv2.imwrite(args.save_path, cv2.cvtColor(np.uint8(predict), cv2.COLOR_RGB2BGR))
    return predict
示例#26
0
            def __init__(self,
                         blur=True,
                         flip=False,
                         rotate=10,
                         shear=10,
                         **kwargs):
                from imgaug import augmenters as iaa

                sequence = []

                sequence += [
                    iaa.Scale((INPUT_SIZE, INPUT_SIZE)),
                    iaa.ContrastNormalization((0.75, 1.25)),
                    iaa.AddElementwise((-10, 10), per_channel=0.5),
                    iaa.AddToHueAndSaturation(value=(-20, 20),
                                              per_channel=True),
                    iaa.Multiply((0.75, 1.25)),
                ]
                sequence += [
                    iaa.PiecewiseAffine(scale=(0.0005, 0.005)),
                    iaa.Affine(
                        rotate=(-rotate, rotate),
                        shear=(-shear, shear),
                        mode='symmetric',
                    ),
                    iaa.Grayscale(alpha=(0.0, 0.5)),
                ]
                if flip:
                    sequence += [
                        iaa.Fliplr(0.5),
                    ]
                if blur:
                    sequence += [
                        iaa.Sometimes(0.01, iaa.GaussianBlur(sigma=(0, 1.0))),
                    ]
                self.aug = iaa.Sequential(sequence)
示例#27
0
            def __init__(self):
                from imgaug import augmenters as iaa

                self.aug = iaa.Sequential(
                    [iaa.Scale((INPUT_SIZE, INPUT_SIZE))])
示例#28
0


numInputChannels = 3
net = deeplab_xception.DeepLabv3_plus(nInputChannels=numInputChannels, n_classes=args.num_of_classes, os=args.output_stride, pretrained=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Let's use", torch.cuda.device_count(), "GPUs!")

print("Initializing weights from: {}...".format(args.checkpoint_path))
net = nn.DataParallel(net)  #Because models are trained with dataparallel and saved directly, we need to use it too.
net.load_state_dict(torch.load(args.checkpoint_path))
net.to(device)


augs_test = iaa.Sequential([
    iaa.Scale((args.imsize, args.imsize), 0), # Resize the img
])

db_test = AlphaPilotSegmentation(
    input_dir=args.input_images_path, label_dir=args.label_images_path,
    transform=augs_test,
    input_only=None
)
testloader = DataLoader(db_test, batch_size=testBatchSize, shuffle=False, num_workers=4, drop_last=True)
num_img_ts = len(testloader)

#===========================Inference Loop=====================================#
net.eval()
total_iou = 0.0
miou = 0.0
miou_per_class = [0] * args.num_of_classes
示例#29
0
    raise NotImplementedError

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#criterion = criterion.to(device) #TODO: IS THIS NEEDED?



# Enable Multi-GPU training
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
    net = nn.DataParallel(net)

augs_train = iaa.Sequential([
    # Geometric Augs
    iaa.Scale((imsize, imsize), 0),
    iaa.Fliplr(0.5),
    iaa.Flipud(0.5),
    iaa.Rot90((0, 4)),
    # Blur and Noise
    #iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 1.5), name="gaus-blur")),
    #iaa.Sometimes(0.1, iaa.Grayscale(alpha=(0.0, 1.0), from_colorspace="RGB", name="grayscale")),
    # iaa.Sometimes(0.2, iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255), per_channel=True, name="gaus-noise")),
    # Color, Contrast, etc.
    iaa.Sometimes(0.2, iaa.Multiply((0.75, 1.25), per_channel=0.1, name="brightness")),
    iaa.Sometimes(0.2, iaa.GammaContrast((0.7, 1.3), per_channel=0.1, name="contrast")),
    iaa.Sometimes(0.2, iaa.AddToHueAndSaturation((-20, 20), name="hue-sat")),
    iaa.Sometimes(0.3, iaa.Add((-20, 20), per_channel=0.5, name="color-jitter")),
])
augs_test = iaa.Sequential([
    # Geometric Augs
示例#30
0
            16)),  # crop images from each side by 0 to 16px (randomly chosen)
    iaa.Flipud(1),  #Flip vertically
    iaa.Fliplr(1),  # horizontally flip
    iaa.GaussianBlur(sigma=(0, 3.0)),  # blur images with a sigma of 0 to 3.0
    iaa.ContrastNormalization(
        (0.75, 1.5)),  # Strengthen or weaken the contrast in each image.
    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255),
                              per_channel=0.5),  # Add gaussian noise.
    iaa.Multiply(
        (0.8, 1.2),
        per_channel=0.2),  # Make some images brighter and some darker.
    iaa.AdditiveGaussianNoise(scale=0.1 * 255),
    iaa.Affine(translate_px={"x": -40}
               ),  # Augmenter to apply affine transformations to images.
    iaa.Scale({
        "height": 512,
        "width": 512
    }),
    iaa.WithChannels(0, iaa.Add((10, 100))),
    iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0)),
    iaa.Add((-40, 40)),
    iaa.Multiply((0.5, 1.5), per_channel=0.5),
    iaa.CoarseDropout(0.02, size_percent=0.5),
    iaa.ContrastNormalization((0.5, 1.5)),
    iaa.Affine(rotate=(-45, 45)),
])


def thread_work(sblst_sub):
    imgs = []
    imgpath = []
    for pic_name in sblst_sub: