def test_compare_crop_and_pad(img_dtype, px, percent, pad_mode, pad_cval, keep_size): h, w, c = 100, 100, 3 mode_mapping = { cv2.BORDER_CONSTANT: "constant", cv2.BORDER_REPLICATE: "edge", cv2.BORDER_REFLECT101: "reflect", cv2.BORDER_WRAP: "wrap", } pad_mode_iaa = mode_mapping[pad_mode] bbox_params = A.BboxParams(format="pascal_voc") keypoint_params = A.KeypointParams(format="xy", remove_invisible=False) keypoints = np.random.randint(0, min(h, w), [10, 2]) bboxes = [] for i in range(10): x1, y1 = np.random.randint(0, min(h, w) - 2, 2) x2 = np.random.randint(x1 + 1, w - 1) y2 = np.random.randint(y1 + 1, h - 1) bboxes.append([x1, y1, x2, y2, 0]) transform_albu = A.Compose( [ A.CropAndPad( px=px, percent=percent, pad_mode=pad_mode, pad_cval=pad_cval, keep_size=keep_size, p=1, interpolation=cv2.INTER_AREA if (px is not None and px < 0) or (percent is not None and percent < 0) else cv2.INTER_LINEAR, ) ], bbox_params=bbox_params, keypoint_params=keypoint_params, ) transform_iaa = A.Compose( [A.IAACropAndPad(px=px, percent=percent, pad_mode=pad_mode_iaa, pad_cval=pad_cval, keep_size=keep_size, p=1)], bbox_params=bbox_params, keypoint_params=keypoint_params, ) if img_dtype == np.uint8: img = np.random.randint(0, 256, (h, w, c), dtype=np.uint8) else: img = np.random.random((h, w, c)).astype(img_dtype) res_albu = transform_albu(image=img, keypoints=keypoints, bboxes=bboxes) res_iaa = transform_iaa(image=img, keypoints=keypoints, bboxes=bboxes) for key, item in res_albu.items(): if key == "bboxes": bboxes = np.array(res_iaa[key]) h = bboxes[:, 3] - bboxes[:, 1] w = bboxes[:, 2] - bboxes[:, 0] res_iaa[key] = bboxes[(h > 0) & (w > 0)] assert np.allclose(item, res_iaa[key]), f"{key} are not equal"
folder_cross1 = '%s/dataset2/Camera1' % (path) folder_cross2 = '%s/dataset2/Camera2' % (path) plt_name = "classes" car_name = "classes_carros" plt_folder = "*/classes" car_folder = "*/classes_carros" ocr_file = '%s/OCR/output.txt' % (path) metadata_length = 35 tam_max = 3 L1_layer = Lambda(lambda tensor: K.abs(tensor[0] - tensor[1])) train_augs = [[], [], [], [], [], []] test_augs = [[], [], [], []] keys = ['Set01', 'Set02', 'Set03', 'Set04', 'Set05'] seq_car = albu.Compose([ albu.IAACropAndPad(px=(0, 8)), albu.IAAAffine(scale=(0.8, 1.2), shear=(-8, 8), order=[0, 1], cval=(0), mode='constant'), albu.ToFloat(max_value=255) ], p=0.7) seq_car2 = albu.Compose([ albu.IAACropAndPad(px=(0, 8)), albu.IAAAffine(scale=(0.8, 1.2), shear=(-8, 8), order=[0, 1], cval=(0),
import albumentations as A aug = A.Compose([ A.Resize(384, 512, interpolation=1, p=1), #A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5), # A.HueSaturationValue(hue_shift_limit=2, sat_shift_limit=15, val_shift_limit=20,p = 0.5), A.OneOf([ A.Blur(blur_limit=5, p=0.3), A.GaussNoise(var_limit=(5.0, 10.0), p=0.3), A.IAASharpen(alpha=(0.1, 0.3), lightness=(0.5, 1.0), p=0.4) ], p=1.0), A.Flip(p=0.5), #A.Transpose(p = 0.5), #A.RandomRotate90(p = 0.5), A.OneOf([ A.IAACropAndPad(percent=(-0.1, 0.1), keep_size=True, p=0.5), A.ShiftScaleRotate(p=0.5) ], p=1.0) ]) for fds in fd_in: flist = [str(fn) for fn in Path(fds).glob('*.jpg')] im_fn = flist[0] # for im_fn in tqdm(flist): img = cv2.cvtColor(cv2.imread(im_fn), cv2.COLOR_BGR2RGB) for n_test in range(9): #img_aug = (aug(image= img)['image']*255).astype('uint8') img_aug = aug(image=img)['image'] #img_aug = cv2.cvtColor(cv2.imread(im_fn), cv2.COLOR_RGB2BGR)