def test_preserve_aspect_ratio(): img_path = os.path.join(here, 'data', 'green_fish.jpg') img = caer.imread(img_path) img_400_400 = caer.resize(img, target_size=(400,400), preserve_aspect_ratio=True) img_223_182 = caer.resize(img, target_size=(223,182), preserve_aspect_ratio=True) img_93_35 = caer.resize(img, target_size=(93,35), preserve_aspect_ratio=True) assert img_400_400.shape[:2] == (400,400) assert img_223_182.shape[:2] == (182,223) # Numpy arrays are processed differently (h,w) as opposed to (w,h) assert img_93_35.shape[:2] == (35,93) # Numpy arrays are processed differently (h,w) as opposed to (w,h)
def test_target_sizes(): img_path = os.path.join(here, 'data', 'beverages.jpg') img = caer.imread(img_path) img_400_400 = caer.resize(img, target_size=(400,400)) img_304_339 = caer.resize(img, target_size=(304,339)) img_199_206 = caer.resize(img, target_size=(199,206)) assert img_400_400.shape[:2] == (400,400) assert img_304_339.shape[:2] == (339,304) # Numpy arrays are processed differently (h,w) as opposed to (w,h) assert img_199_206.shape[:2] == (206,199) # Numpy arrays are processed differently (h,w) as opposed to (w,h)
def test_preserve_aspect_ratio(): tens_400_400 = caer.resize(cv_tens, target_size=(400,400), preserve_aspect_ratio=True) tens_223_182 = caer.resize(cv_tens, target_size=(223,182), preserve_aspect_ratio=True) tens_93_35 = caer.resize(cv_tens, target_size=(93,35), preserve_aspect_ratio=True) assert tens_400_400.shape[:2] == (400,400) assert tens_223_182.shape[:2] == (182,223) assert tens_93_35.shape[:2] == (35,93) # Type Asserts ## Using isinstance() often mistakes a caer.Tensor as an np.ndarray assert 'caer.Tensor' in str(type(tens_400_400)) assert 'caer.Tensor' in str(type(tens_223_182)) assert 'caer.Tensor' in str(type(tens_93_35))
def show_resized_image(): global currentImage global resizedImgBtn global flipHImgBtn global flipVImgBtn global flipHVImgBtn global rotateImgBtn tempSize = selectedSize.get() if 'x' in tempSize: size = tempSize.replace(' ', '').split('x') try: if resizedImgBtn['bg'] == 'lightgrey': resizedImgBtn['bg'] = 'lightblue' if flipHImgBtn['bg'] == 'lightblue': flipHImgBtn['bg'] = 'lightgrey' elif flipVImgBtn['bg'] == 'lightblue': flipVImgBtn['bg'] = 'lightgrey' elif flipHVImgBtn['bg'] == 'lightblue': flipHVImgBtn['bg'] = 'lightgrey' else: rotateImgBtn['bg'] = 'lightgrey' if not transformedImage is None: currentImage = transformedImage reset_ghs() # Resize the image without preserving aspect ratio currentImage = caer.resize(currentImage, target_size=(int(size[0]), int(size[1])), preserve_aspect_ratio=False) currentImage.cspace = 'rgb' if rotationApplied: show_rotated_image() else: image_show(currentImage) except Exception as e: print(str(e))
def test_target_sizes(): # Should return <ndarray>s tens_400_400 = caer.resize(cv_tens, target_size=(400, 400)) tens_304_339 = caer.resize(cv_tens, target_size=(304, 339)) tens_199_206 = caer.resize(cv_tens, target_size=(199, 206)) # Should return <caer.Tensor>s caer_tens_400_400 = caer.resize(caer_tens, target_size=(400, 400)) caer_tens_304_339 = caer.resize(caer_tens, target_size=(304, 339)) caer_tens_199_206 = caer.resize(caer_tens, target_size=(199, 206)) assert tens_400_400.shape[:2] == (400, 400) assert tens_304_339.shape[:2] == (339, 304) assert tens_199_206.shape[:2] == (206, 199) # Type Asserts ## Using isinstance() often mistakes a caer.Tensor as an np.ndarray assert 'caer.Tensor' in str(type(tens_400_400)) assert 'caer.Tensor' in str(type(tens_304_339)) assert 'caer.Tensor' in str(type(tens_199_206))
def adjust_ghsps(*args): global transformedImage if not currentImage is None: # reset the error label's text if lblError['text'] == 'Error': lblError['text'] = '' transformedImage = caer.to_tensor(currentImage, cspace="rgb") # apply all transformations to currently displayed image if image_resized: transformedImage = caer.resize(transformedImage, target_size=(int(image_size[0]), int(image_size[1])), preserve_aspect_ratio=False) if hue.get() != 0.0: transformedImage = caer.transforms.adjust_hue( transformedImage, hue.get()) if saturation.get() != 1.0: transformedImage = caer.transforms.adjust_saturation( transformedImage, saturation.get()) if imgGamma.get() != 1.05: transformedImage = caer.transforms.adjust_gamma( transformedImage, imgGamma.get()) if sharpen.get() != 8.9: transformedImage = caer.core.cv.filter2D(transformedImage, -1, sharpenKernel) gb = gaussian_blur.get() if gb > 1: transformedImage = caer.core.cv.GaussianBlur( transformedImage, (gb + 1, gb + 1), caer.core.cv.BORDER_DEFAULT) if posterize.get() < 6: transformedImage = caer.transforms.posterize( transformedImage, posterize.get()) if solarize.get() < 255: transformedImage = caer.transforms.solarize( transformedImage, solarize.get()) if sobel_threshold.get() > 0: transformedImage = caer.core.cv.cvtColor( transformedImage, caer.core.cv.COLOR_RGB2GRAY) sobelKernel = sobel_threshold.get() if sobel_threshold.get( ) % 2 != 0 else sobel_threshold.get() + 1 # values 1, 3 and 5 dx = dy = sobel_threshold.get() - 2 if sobel_threshold.get( ) > 2 else sobel_threshold.get() sobelx = caer.core.cv.Sobel(transformedImage, caer.core.cv.IMREAD_GRAYSCALE, dx, 0, ksize=sobelKernel) sobely = caer.core.cv.Sobel(transformedImage, caer.core.cv.IMREAD_GRAYSCALE, 0, dy, ksize=sobelKernel) transformedImage = caer.core.cv.bitwise_or(sobelx, sobely) transformedImage = caer.core.cv.cvtColor( transformedImage, caer.core.cv.COLOR_GRAY2RGB) if show_edges.get() == 1: transformedImage = caer.core.cv.cvtColor( transformedImage, caer.core.cv.COLOR_RGB2GRAY) transformedImage = caer.core.cv.Canny(transformedImage, low_threshold.get(), low_threshold.get() * 2) transformedImage = caer.core.cv.cvtColor( transformedImage, caer.core.cv.COLOR_GRAY2RGB) if show_emboss.get() == 1: transformedImage = caer.core.cv.filter2D( transformedImage, -1, embossKernel) + emboss.get() if flip_H: transformedImage = caer.transforms.hflip(transformedImage) if flip_V: transformedImage = caer.transforms.vflip(transformedImage) if rotationApplied: show_rotated_image(True) else: image_show(transformedImage)
import cv2 as cv import caer img = cv.imread('./dataset/testing_images/bryan-1.jpg') resized = caer.resize(img, target_size=(400, 400), preserve_aspect_ratio=True) cv.imshow('Detected Faces', img) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray People', gray) haar_cascade = cv.CascadeClassifier('./cascade/cascade.xml') faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=1) print(f'Number of faces found = {len(faces_rect)}') for (x, y, w, h) in faces_rect: cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2) cv.imshow('Detected Faces', img) cv.waitKey(0)