Exemple #1
0
def SharpEnchance(Img, factor):
    enh_sha = ImageEnhance.Sharpness(Img)
    image_sharped = enh_sha.enhance(factor)
    return image_sharped
Exemple #2
0
def main():

    count = 0
    #parameter
    wsize = 1024  # double the resolution
    Gamma = 0.97
    Phi = 200
    Epsilon = 0.1
    k = 2
    Sigma = 1.5
    max_length = 20
    min_length = 10
    max_dif = 30
    n_point = 50
    dir = 3

    input_paths = glob.glob(in_dir + '/*.jpg')
    input_paths += (glob.glob(in_dir + '/*.jpeg'))
    input_paths += (glob.glob(in_dir + '/*.png'))
    for files1 in input_paths:
        filepath, filename = os.path.split(files1)

        im = Image.open(files1).convert('L')
        im = array(ImageEnhance.Sharpness(im).enhance(3.0))
        im2 = filters.gaussian_filter(im, Sigma)
        im3 = filters.gaussian_filter(im, Sigma * k)
        differencedIm2 = im2 - (Gamma * im3)
        (x, y) = shape(im2)
        for i in range(x):
            for j in range(y):
                if differencedIm2[i, j] < Epsilon:
                    differencedIm2[i, j] = 1
                else:
                    differencedIm2[i, j] = 250 + tanh(Phi *
                                                      (differencedIm2[i, j]))

        gray_pic = differencedIm2.astype(np.uint8)
        color_pic = Image.open(files1)
        real = np.atleast_2d(color_pic)

        if real.ndim == 3:
            w, h, c = real.shape
            if c == 3:
                image = color_pic.filter(MyGaussianBlur(radius=5))
                mat = np.atleast_2d(image)

                if gray_pic.ndim == 2:
                    gray_pic = np.expand_dims(gray_pic, 2)
                    gray_pic = np.tile(gray_pic, [1, 1, 3])

        #       for i in range(n_point):
        #          length = 0
        #         while length < min_length:
        #            ws, hs, wt, ht, length = gen_color_line(mat, dir,max_length,max_dif)
        #       gray_pic[ws:wt, hs:ht, :] = mat[ws:wt, hs:ht, :]

                gray_pic = np.append(real, gray_pic, axis=1)
                final_img = Image.fromarray(gray_pic)

                im = final_img
                w, h = im.size
                hsize = int(h * wsize / float(w))
                if hsize * 2 > wsize:  # crop to three
                    im = im.resize((wsize, hsize))
                    bounds1 = (0, 0, wsize, int(wsize / 2))
                    cropImg1 = im.crop(bounds1)
                    # cropImg1.show()
                    cropImg1.save(os.path.join(out_dir, 'u' + filename))
                    bounds2 = (0, hsize - int(wsize / 2), wsize, hsize)
                    cropImg2 = im.crop(bounds2)
                    # cropImg.show()
                    cropImg2.save(os.path.join(out_dir, 'd' + filename))
                else:
                    im = im.resize((wsize, (wsize // 2)))
                    im.save(os.path.join(out_dir, 't' + filename))
                count += 1
                print('done!' + str(count))
 def transform(self, image, factor):
     enhancer = ImageEnhance.Sharpness(Image.fromarray(image))
     return np.array(enhancer.enhance(factor))
 def __call__(self, img):
     alpha = random.uniform(1 - self.var, 1 + self.var)
     return ie.Sharpness(img).enhance(alpha)
Exemple #5
0
async def deepfry(img: Image,
                  *,
                  token: str = None,
                  url_base: str = 'westcentralus',
                  session: aiohttp.ClientSession = None,
                  type=DeepfryTypes.RED) -> Image:
    """
    Deepfry an image.
    
    img: PIL.Image - Image to deepfry.
    [token]: str - Token to use for Microsoft facial recognition API. If this is not supplied, lens flares will not be added.
    [url_base]: str = 'westcentralus' - API base to use. Only needed if your key's region is not `westcentralus`.
    [session]: aiohttp.ClientSession - Optional session to use with API requests. If provided, may provide a bit more speed.

    Returns: PIL.Image - Deepfried image.
    """
    img = img.copy().convert('RGB')

    if type not in DeepfryTypes:
        raise ValueError(
            f'Unknown deepfry type "{type}", expected a value from deeppyer.DeepfryTypes'
        )

    if token:
        req_url = f'https://{url_base}.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=false&returnFaceLandmarks=true'  # WHY THE F**K IS THIS SO LONG
        headers = {
            'Content-Type': 'application/octet-stream',
            'Ocp-Apim-Subscription-Key': token,
            'User-Agent': 'DeepPyer/1.0'
        }
        b = BytesIO()

        img.save(b, 'jpeg')
        b.seek(0)

        if session:
            async with session.post(req_url, headers=headers,
                                    data=b.read()) as r:
                face_data = await r.json()
        else:
            async with aiohttp.ClientSession() as s, s.post(
                    req_url, headers=headers, data=b.read()) as r:
                face_data = await r.json()

        if 'error' in face_data:
            err = face_data['error']
            code = err.get('code', err.get('statusCode'))
            msg = err['message']

            raise Exception(
                f'Error with Microsoft Face Recognition API\n{code}: {msg}')

        if face_data:
            landmarks = face_data[0]['faceLandmarks']

            # Get size and positions of eyes, and generate sizes for the flares
            eye_left_width = math.ceil(landmarks['eyeLeftInner']['x'] -
                                       landmarks['eyeLeftOuter']['x'])
            eye_left_height = math.ceil(landmarks['eyeLeftBottom']['y'] -
                                        landmarks['eyeLeftTop']['y'])
            eye_left_corner = (landmarks['eyeLeftOuter']['x'],
                               landmarks['eyeLeftTop']['y'])
            flare_left_size = eye_left_height if eye_left_height > eye_left_width else eye_left_width
            flare_left_size *= 4
            eye_left_corner = tuple(
                math.floor(x - flare_left_size / 2.5 + 5)
                for x in eye_left_corner)

            eye_right_width = math.ceil(landmarks['eyeRightOuter']['x'] -
                                        landmarks['eyeRightInner']['x'])
            eye_right_height = math.ceil(landmarks['eyeRightBottom']['y'] -
                                         landmarks['eyeRightTop']['y'])
            eye_right_corner = (landmarks['eyeRightInner']['x'],
                                landmarks['eyeRightTop']['y'])
            flare_right_size = eye_right_height if eye_right_height > eye_right_width else eye_right_width
            flare_right_size *= 4
            eye_right_corner = tuple(
                math.floor(x - flare_right_size / 2.5 + 5)
                for x in eye_right_corner)

    # Crush image to hell and back
    img = img.convert('RGB')
    width, height = img.width, img.height
    img = img.resize((int(width**.75), int(height**.75)),
                     resample=Image.LANCZOS)
    img = img.resize((int(width**.88), int(height**.88)),
                     resample=Image.BILINEAR)
    img = img.resize((int(width**.9), int(height**.9)), resample=Image.BICUBIC)
    img = img.resize((width, height), resample=Image.BICUBIC)
    img = ImageOps.posterize(img, 4)

    # Generate red and yellow overlay for classic deepfry effect
    r = img.split()[0]
    r = ImageEnhance.Contrast(r).enhance(2.0)
    r = ImageEnhance.Brightness(r).enhance(1.5)

    if type == DeepfryTypes.RED:
        r = ImageOps.colorize(r, Colours.RED, Colours.YELLOW)
    elif type == DeepfryTypes.BLUE:
        r = ImageOps.colorize(r, Colours.BLUE, Colours.WHITE)

    # Overlay red and yellow onto main image and sharpen the hell out of it
    img = Image.blend(img, r, 0.75)
    img = ImageEnhance.Sharpness(img).enhance(100.0)

    if token and face_data:
        # Copy and resize flares
        flare = Image.open('./deeppyer/flare.png')
        flare_left = flare.copy().resize((flare_left_size, ) * 2,
                                         resample=Image.BILINEAR)
        flare_right = flare.copy().resize((flare_right_size, ) * 2,
                                          resample=Image.BILINEAR)

        del flare

        img.paste(flare_left, eye_left_corner, flare_left)
        img.paste(flare_right, eye_right_corner, flare_right)

    return img
Exemple #6
0
def posterize_func(img, bboxes, bits):
    out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
    return out, bboxes


if __name__ == '__main__':
    import PIL
    from PIL import Image, ImageEnhance, ImageOps
    import time

    pth = './pic.jpg'
    impil = Image.open(pth)
    imcv = cv2.imread(pth)
    out_cv = sharpness_func(imcv, 0.3).astype(np.float32)
    sharp_pil = ImageEnhance.Sharpness(impil)
    out_pil = np.array(sharp_pil.enhance(0.3))[:, :, ::-1].astype(np.float32)
    print('sharpness')
    print(np.sum(np.abs(out_pil - out_cv)))
    print(np.max(np.abs(out_pil - out_cv)))
    print(np.min(np.abs(out_pil - out_cv)))

    pth = './pic.jpg'
    impil = Image.open(pth)
    imcv = cv2.imread(pth)
    t1 = time.time()
    n_test = 100
    for i in range(n_test):
        out_cv = autocontrast_func(imcv, 20).astype(np.float32)
    t2 = time.time()
    for i in range(n_test):
def sharpness(image,val):
    _image = ImageEnhance.Sharpness(image)
    return  _image.enhance(val)
Exemple #8
0
        circle_status = 0
    else:
        circle_status = 1

    if analyze_pixel(capture_bar) == capture_bar_found:
        capture_bar_status = 1
    else:
        capture_bar_status = 0

    return [circle_status, capture_bar_status, artist_box_status]


if __name__ == "__main__":
    print("Simple: ", read_image("test.png"))
    print()
    sharp = ImageEnhance.Sharpness(Image.open("test.png"))
    sharp = sharp.enhance(0.5)
    sharp.save("test_sharpness.png")
    print("Sharpness: ", read_image("test_sharpness.png"))
    print()
    bright = ImageEnhance.Brightness(Image.open("test.png"))
    bright = bright.enhance(0.5)
    bright.save("test_brightness.png")
    print("Brightness: ", read_image("test_brightness.png"))
    print()
    contr = ImageEnhance.Contrast(Image.open("test.png"))
    contr = contr.enhance(0.5)
    contr.save("test_contrast.png")
    print("Contrast: ", read_image("test_contrast.png"))
    print()
    color = ImageEnhance.Color(Image.open("test.png"))
Exemple #9
0
def imageInflated(_inImportPath, _inExportPath):
    files = os.listdir(_inImportPath)
    imgNum = 0
    for file in files:
        index = re.search('.png',file)
        if index:
            imgNum += 1

    print(imgNum)

    for i in range(imgNum):

        importImagePath = os.path.join(_inImportPath,str(i+1)+'.png')
        im_0 = Image.open(importImagePath)
        saveImage(im_0,_inExportPath)

        im_1 = ImageOps.mirror(im_0)
        saveImage(im_1,_inExportPath)

        im_2 = im_0.point(lambda x:x * 1.3)
        saveImage(im_2,_inExportPath)

        im_3 = im_1.point(lambda x:x * 1.3)
        saveImage(im_3,_inExportPath)

        im_4 = im_0.point(lambda x:x * 0.7)
        saveImage(im_4,_inExportPath)

        im_5 = im_1.point(lambda x:x * 0.7)
        saveImage(im_5,_inExportPath)

        im_6 = changeColorRate(im_0,1.2,1,1)
        saveImage(im_6,_inExportPath)

        im_8 = changeColorRate(im_1,1.2,1,1)
        saveImage(im_8,_inExportPath)

        im_7 = changeColorRate(im_0,0.8,1,1)
        saveImage(im_7,_inExportPath)

        im_9 = changeColorRate(im_1,0.8,1,1)
        saveImage(im_9,_inExportPath)

        im_10 = changeColorRate(im_0,1,1.2,1)
        saveImage(im_10,_inExportPath)

        im_12 = changeColorRate(im_1,1,1.2,1)
        saveImage(im_12,_inExportPath)

        im_11 = changeColorRate(im_0,1,0.8,1)
        saveImage(im_11,_inExportPath)

        im_13 = changeColorRate(im_1,1,0.8,1)
        saveImage(im_13,_inExportPath)

        im_14 = changeColorRate(im_0,1,1,1.2)
        saveImage(im_14,_inExportPath)

        im_16 = changeColorRate(im_1,1,1,1.2)
        saveImage(im_16,_inExportPath)

        im_15 = changeColorRate(im_0,1,1,0.8)
        saveImage(im_15,_inExportPath)

        im_17 = changeColorRate(im_1,1,1,0.8)
        saveImage(im_17,_inExportPath)

        iec_1 = ImageEnhance.Contrast(im_0)
        im_18 = iec_1.enhance(1.3)
        saveImage(im_18,_inExportPath)

        iec_2 = ImageEnhance.Contrast(im_1)
        im_19 = iec_2.enhance(1.3)
        saveImage(im_19,_inExportPath)

        iec_3 = ImageEnhance.Contrast(im_0)
        im_20 = iec_3.enhance(0.7)
        saveImage(im_20,_inExportPath)

        iec_4 = ImageEnhance.Contrast(im_1)
        im_21 = iec_4.enhance(0.7)
        saveImage(im_21,_inExportPath)

        iec_5 = ImageEnhance.Color(im_0)
        im_22 = iec_5.enhance(1.3)
        saveImage(im_22,_inExportPath)

        iec_6 = ImageEnhance.Color(im_1)
        im_23 = iec_6.enhance(1.3)
        saveImage(im_23,_inExportPath)

        iec_7 = ImageEnhance.Color(im_0)
        im_24 = iec_7.enhance(0.7)
        saveImage(im_24,_inExportPath)

        iec_8 = ImageEnhance.Color(im_1)
        im_25 = iec_8.enhance(0.7)
        saveImage(im_25,_inExportPath)

        iec_9 = ImageEnhance.Sharpness(im_0)
        im_26 = iec_9.enhance(2)
        saveImage(im_26,_inExportPath)

        iec_10 = ImageEnhance.Sharpness(im_1)
        im_27 = iec_10.enhance(2)
        saveImage(im_27,_inExportPath)

        iec_11 = ImageEnhance.Sharpness(im_0)
        im_28 = iec_11.enhance(0.3)
        saveImage(im_28,_inExportPath)

        iec_12 = ImageEnhance.Sharpness(im_1)
        im_29 = iec_12.enhance(0.3)
        saveImage(im_29,_inExportPath)
    def transform_image(self, img, shape, trials=20):
        # Skew
        ################
        # w, h = img.size
        # skew = random.random()*2 - 1
        # xshift = abs(skew) * w
        # new_width = w + int(round(xshift))
        # img = img.transform(
        #     (new_width, h),
        #     Image.AFFINE,
        #     (1, skew, -xshift if skew > 0 else 0, 0, 1, 0),
        #     Image.BICUBIC
        # )

        # Rotate
        ################
        theta = random.randint(-180, 180)
        img = img.rotate(theta)

        # Random Crop
        #################
        w, h = img.size
        rand_points = []
        c = 0

        while c < trials:
            try:
                # rand top left corner
                rand_x, rand_y = random.randint(0, int(w / 3)), random.randint(
                    0, int(h / 3))

                # rand side length greater than half shape dimensions
                rand_side = random.randint(min(shape[:2]),
                                           min((w - rand_x), (h - rand_y)) - 1)

                # generates the set of corners for random crop
                rand_points = [(rand_x, rand_y), (rand_x + rand_side, rand_y),
                               (rand_x, rand_y + rand_side),
                               (rand_x + rand_side, rand_y + rand_side)]

                # if all corners are part of image => move on
                for x, y in rand_points:
                    if img.getpixel((x, y)) == (0, 0, 0):
                        rand_points.remove((x, y))
                if len(rand_points) == 4:
                    break

                # Try again
                c += 1

            # If exception occurred => try again
            except:
                c += 1

        # If couldn't generate image in n trials => return None
        if c >= trials:
            return None

        # Sharpening, Brightness, and Contrast
        #################
        sharpener = ImageEnhance.Sharpness(img)
        img = sharpener.enhance(1 + (random.random() - .5) / 2)

        brightener = ImageEnhance.Brightness(img)
        img = brightener.enhance(1 + (random.random() - .5) / 2)

        contraster = ImageEnhance.Contrast(img)
        img = contraster.enhance(1 + (random.random() - .5) / 2)

        # Final Resize
        ##################
        box = (rand_x, rand_y, rand_x + rand_side, rand_y + rand_side)
        img = img.resize((shape[0], shape[1]), Image.LANCZOS, box)

        # img.show()

        return img
Exemple #11
0
 def apply(self, image):
     return ImageEnhance.Sharpness(image).enhance(self.value)
Exemple #12
0
 def sharpnessPhoto(self, level):
     '''Регулировка резкости изображения'''
     self.objectMedia = ImageEnhance.Sharpness(
         self.objectMedia).enhance(level)
     self.addHistoryChange()
def img_prep(img):
    enh_bri = ImageEnhance.Brightness(img).enhance(1.05)
    enh_col = ImageEnhance.Color(enh_bri).enhance(1.6)
    enh_con = ImageEnhance.Contrast(enh_col).enhance(1.8)
    new_img = ImageEnhance.Sharpness(enh_con).enhance(2.5)
    return new_img
Exemple #14
0
import pandas as pd
from PIL import Image
from PIL import ImageEnhance

im = Image.open("test.png")
double_size = (im.size[0] * 2, im.size[1] * 2)
larger_im = im.resize(double_size)
larger_im = ImageEnhance.Sharpness(larger_im).enhance(2.0)
larger_im.save("enhance.png")

with open('tableResult6.txt', 'r') as file:
    a = file.read()
txt_vals = [b.split('\t') for b in a.split('\n') if b]

di_rows = []
course = []
reoc = []
start = []
end = []
DIreoc = []
DIstart = []
DIend = []
FIdate = []
FIstart = []
FIend = []
courseReach = 0


# well I need this
def hasNumber(inputString):
    return any(char.isdigit() for char in inputString)
def ContrastivePredictiveCodingAugmentations(img):
    # We use transformations as traceable
    # https://arxiv.org/pdf/1805.09501.pdf
    pool = [
        transforms.RandomRotation(  # Rotation
            30,
            resample=False,
            expand=False,
            center=None,
            fill=None),
        transforms.RandomAffine(  # Shearing
            0,
            translate=None,
            scale=None,
            shear=30,
            resample=False,
            fillcolor=0),
        transforms.RandomAffine(  # Translate
            0,
            translate=(0.3, 0.3),
            scale=None,
            shear=None,
            resample=False,
            fillcolor=0),
        transforms.Lambda(lambda x: imo.autocontrast(x)),  # Autocontrast
        transforms.Lambda(lambda x: imo.invert(x)),  # Invert
        transforms.Lambda(lambda x: imo.equalize(x)),  # Equalize
        transforms.Lambda(lambda x: imo.solarize(x)),  # Solarize
        transforms.Lambda(lambda x: imo.posterize(
            x, bits=int(np.random.randint(4, 8) + 1))),  # Posterize
        transforms.Lambda(
            lambda x: ime.Color(x).enhance(np.random.uniform())),  # Color
        transforms.Lambda(lambda x: ime.Brightness(x).enhance(
            np.random.uniform())),  # Brightness
        transforms.Lambda(lambda x: ime.Contrast(x).enhance(np.random.uniform(
        ))),  # Contrast
        transforms.Lambda(lambda x: ime.Sharpness(x).enhance(np.random.uniform(
        ))),  # Sharpness
        transforms.Compose(  # Set black
            [
                transforms.ToTensor(),
                transforms.RandomErasing(1.0),
                transforms.ToPILImage()
            ])
    ]

    # 1.
    t1 = transforms.RandomChoice(pool)
    t2 = transforms.RandomChoice(pool)
    t3 = transforms.RandomChoice(pool)

    img = t3(t2(t1(img)))

    # https://www.nature.com/articles/s41591-018-0107-6
    # 2. Only elastic def, no shearing as this is part of pool as well as hist changes
    if np.random.uniform() < 0.2:
        img = elastic_transform(img, sigma=10)

    # 3. In pool
    # 4.
    if np.random.uniform() < 0.25:
        img = transforms.functional.to_grayscale(img, num_output_channels=3)

    return img
Exemple #16
0
    def __getitem__(self, idx):
        if self.target_network is None:
            self.target_network = get_model(self.target_network_name,
                                            gpus=[0],
                                            num_classes=self.dataset.num_class,
                                            train_aug=self.target_aug).eval()

        img_orig, lb = self.dataset[idx]
        n_img, n_lb, n_losses, n_corrects = [], [], [], []
        for _ in range(self.num_sample):
            tta_rotate_default = random.choice(
                self.transform_r) if random.random() < self.d_tta_prob else 0.0
            tta_zoom_default = random.choice(
                self.transform_zs
            ) if random.random() < self.d_tta_prob else 1.0
            tta_bright_default = random.choice(
                self.transform_b) if random.random() < self.d_tta_prob else 1.0

            for t_flip in self.transform_flip:
                img_new = img_orig.copy()

                corrupt_name = corrupt_op = corrupt_lv = None
                if self.do_random_corrupt and random.random(
                ) < self.d_tta_prob:  # TODO
                    corrupt_op = random.choice(corruption_tuple)
                    corrupt_lv = random.choice([1, 2, 3, 4, 5])
                elif isinstance(self.corrupt_list, list) and random.random(
                ) < self.d_tta_prob:  # TODO : Partial Corruptions
                    corrupt_name = random.choice(self.corrupt_list)
                    corrupt_op = corruption_dict[corrupt_name]
                    corrupt_lv = random.choice([1, 2, 3, 4, 5])
                    # corrupt_lv = random.choice([3, 4, 5])

                if corrupt_op is not None:
                    img_np = corrupt_op(img_new, severity=corrupt_lv)
                    if isinstance(img_np, np.ndarray):
                        img_np = img_np.astype(np.uint8)
                        img_new = Image.fromarray(img_np)
                    elif isinstance(img_np, PIL.Image.Image):
                        img_new = img_np
                    else:
                        raise Exception(type(img_np))

                if t_flip:
                    img_new = torchvision.transforms.functional.hflip(img_new)
                mirror_expansion_factor = 3
                try:
                    img_mirror = mirror_expansion(img_new)
                except Exception as e:
                    print(corrupt_op, corrupt_lv)
                    print(e)
                    print(type(img_new))
                    print(img_new.size)
                    raise e
                img_new = img_mirror.copy()

                if tta_rotate_default != 0:
                    img_new = torchvision.transforms.functional.rotate(
                        img_new,
                        tta_rotate_default,
                        expand=False,
                        resample=PIL.Image.BICUBIC)
                assert tta_bright_default > 0
                if tta_bright_default != 1.0:
                    img_new = torchvision.transforms.functional.adjust_brightness(
                        img_new, tta_bright_default)
                new_resize = int((self.target_size + self.padding) *
                                 mirror_expansion_factor * tta_zoom_default)
                assert 0.5 < tta_zoom_default < 1.5
                if tta_zoom_default != 1.0:
                    img_new = torchvision.transforms.functional.resize(
                        img_new, new_resize, interpolation=PIL.Image.BICUBIC)

                imgs_pil = []
                for tta_action in tta_actions:
                    tta_rotate, tta_brightness, tta_zoom, tta_contrast, tta_color, tta_blur, tta_att, _ = decode(
                        tta_action)
                    if tta_rotate != 0:
                        img_rotate = torchvision.transforms.functional.rotate(
                            img_new,
                            tta_rotate,
                            expand=False,
                            resample=PIL.Image.BICUBIC)
                    else:
                        img_rotate = img_new.copy()
                    if tta_brightness != 1.0:
                        img_bright = torchvision.transforms.functional.adjust_brightness(
                            img_rotate, tta_brightness)
                    else:
                        img_bright = img_rotate.copy()
                    if tta_zoom != 1.0:
                        resize = int(new_resize * tta_zoom)
                        img_zoom = torchvision.transforms.functional.resize(
                            img_bright,
                            resize,
                            interpolation=PIL.Image.BICUBIC)
                        assert img_zoom.width > 32, (img_zoom.size,
                                                     img_bright.size)
                    else:
                        img_zoom = img_bright.copy()

                    if tta_contrast > 0.0:
                        img_zoom = PIL.ImageOps.autocontrast(img_zoom)
                    assert img_zoom.width > 32, ('autocont', img_zoom.size,
                                                 img_bright.size, img_new.size)
                    if tta_color != 1.0:
                        img_zoom = PIL.ImageEnhance.Color(img_zoom).enhance(
                            tta_color)
                    assert img_zoom.width > 32, ('color', img_zoom.size,
                                                 img_bright.size, img_new.size)
                    if tta_blur != 1.0:
                        img_zoom = ImageEnhance.Sharpness(img_zoom).enhance(
                            tta_blur)
                    assert img_zoom.width > 32, ('blur', img_zoom.size,
                                                 img_bright.size, img_new.size)

                    w, h = img_zoom.size
                    att_padding = self.padding if self.padding else 0
                    pw, ph = max(0, self.target_size + att_padding - w), max(
                        0, self.target_size + att_padding - h)
                    pw1, ph1 = pw // 2, ph // 2
                    pw2, ph2 = pw - pw1, ph - ph1
                    if pw1 > 0 or ph1 > 0 or pw2 > 0 or ph2 > 0:
                        img_pad = torchvision.transforms.functional.pad(
                            img_zoom, (pw1, ph1, pw2, ph2),
                            random.randint(0, 255), 'reflect')
                    else:
                        img_pad = img_zoom
                    # img = torchvision.transforms.functional.center_crop(img_zoom, (self.target_size, self.target_size))

                    crop_width = crop_height = self.target_size
                    # print(tta_action, 'orig.size=', img_orig.size, 'zoom.size=', img_zoom.size, 'img_pad.size', img_pad.size, 'target_size=', self.target_size, 'padding=', self.padding)
                    if tta_att == 0:
                        img = pil_center_crop(img_pad, self.target_size)
                    elif tta_att == 1:
                        img_pad = pil_center_crop(
                            img_pad, int(self.target_size + att_padding))
                        img = img_pad.crop((0, 0, crop_width, crop_height))
                    elif tta_att == 2:
                        img_pad = pil_center_crop(
                            img_pad, int(self.target_size + att_padding))
                        image_width, image_height = img_pad.size
                        img = img_pad.crop((image_width - crop_width, 0,
                                            image_width, crop_height))
                    elif tta_att == 3:
                        img_pad = pil_center_crop(
                            img_pad, int(self.target_size + att_padding))
                        image_width, image_height = img_pad.size
                        img = img_pad.crop((0, image_height - crop_height,
                                            crop_width, image_height))
                    elif tta_att == 4:
                        img_pad = pil_center_crop(
                            img_pad, int(self.target_size + att_padding))
                        image_width, image_height = img_pad.size
                        img = img_pad.crop((image_width - crop_width,
                                            image_height - crop_height,
                                            image_width, image_height))
                    else:
                        raise Exception

                    imgs_pil.append(img)
                    self.img_pils = imgs_pil

                imgs = []
                for img in imgs_pil:
                    img = torchvision.transforms.functional.to_tensor(img)
                    img = torchvision.transforms.functional.normalize(
                        img,
                        mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225])
                    imgs.append(img)
                imgs = torch.stack(imgs).cuda()
                assert len(imgs) == tta_num

                with torch.no_grad():
                    preds = self.target_network(imgs)
                corrects = (torch.argmax(
                    preds,
                    dim=1).squeeze() == lb).detach().cpu().int().float()
                lbs = torch.tensor([lb] * tta_num).squeeze().cuda()
                # taus = torch.FloatTensor(tta_taus).detach()
                losses = torch.nn.functional.cross_entropy(
                    preds, lbs, reduction='none').detach().cpu()
                del preds
                if self.target_size > 32:  # TODO
                    torch.cuda.empty_cache()

                w, h = img_new.size
                pw, ph = max(0, self.target_size + self.padding - w), max(
                    0, self.target_size + self.padding - h)
                pw1, ph1 = pw // 2, ph // 2
                pw2, ph2 = pw - pw1, ph - ph1
                if pw1 > 0 or ph1 > 0 or pw2 > 0 or ph2 > 0:
                    img_new = torchvision.transforms.functional.pad(
                        img_new, (pw1, ph1, pw2, ph2), random.randint(0, 255),
                        'reflect')
                if img_new.size[0] >= self.target_size or img_new.size[
                        1] >= self.target_size:
                    # img_new = torchvision.transforms.functional.center_crop(img_new, self.target_size)
                    img_new = pil_center_crop(img_new, self.target_size)
                self.orig_img_pil = img_new

                img_new = cutout(img_new,
                                 cutsize=self.cutout * mirror_expansion_factor)

                if self.is_test:
                    return img_mirror, imgs_pil, img_new, losses, corrects

                img_new = torchvision.transforms.functional.resize(
                    img_new, self.l2t_size,
                    interpolation=PIL.Image.BICUBIC)  # TODO
                img_new = torchvision.transforms.functional.to_tensor(img_new)
                img_new = torchvision.transforms.functional.normalize(
                    img_new,
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]).cpu()

                n_img.append(img_new)
                n_lb.append(lb)
                n_losses.append(losses)
                n_corrects.append(corrects)
        return torch.stack(n_img), torch.Tensor(n_lb), torch.stack(
            n_losses), torch.stack(n_corrects)
Exemple #17
0
def enhance():
    img = Image.open("./upsample/edges_001_qp30.jpg")
    # enhancer1 = img.filter(ImageFilter.EDGE_ENHANCE_MORE)
    enhancer1 = ImageEnhance.Sharpness(img).enhance(5.0)
    enhancer1.save("test1.jpg")
Exemple #18
0
def main_loop(cap):
    inverted = False
    autocontrast = False
    black_on_yellow = False
    freeze = False
    magnification = 1.0
    brightness = 1.0
    contrast = 1.0
    sharpness = 1.0
    show_params = False
    take_new_frame = False
    UVC_functions.set_autofocus(True)
    autofocus = True
    focusval = 0
    rotate180 = False

    # default values for grabbed-frame height and width:
    cam_width = 1920
    cam_height = 1080
    crop_width = 100
    crop_height = 100

    config = ConfigParser.ConfigParser()
    cfg_read = config.read("EVidMag.cfg")
    print(cfg_read)
    if cfg_read == []:
        print("Unable to read configuration file VidMag.cfg!")
    else:
        user_options = []
        try:
            user_options = ConfigSectionMap(config, "USER")
        except:
            print("Failed to read section [USER] from configuration file!")
        print("User options:", user_options)
        camera_cfg_file = user_options['camera_config']
        if camera_cfg_file != "":
            camera_cfg_file += ".cfg"
            cam_config = ConfigParser.ConfigParser()
            cam_cfg_read = cam_config.read(camera_cfg_file)
            if cam_cfg_read != []:
                cam_options = ConfigSectionMap(cam_config, "Resolution")
                try:
                    cam_width = int(cam_options['width'])
                    cam_height = int(cam_options['height'])
                except ValueError:
                    print("Height or Width setting invalid!")
            else:
                print(
                    "Invalid Camera_Config specified in VidMan.cfg, section [USER] - using default frame size 1920 x 1080"
                )
        else:
            print(
                "No Camera_Config specified in VidMan.cfg, section [USER] - using default frame size 1920 x 1080"
            )
        crop_height_str = user_options['crop_height']
        try:
            crop_height = int(crop_height_str)
        except ValueError:
            crop_height = cam_height
        crop_width_str = user_options['crop_width']
        try:
            crop_width = int(crop_width_str)
        except ValueError:
            crop_width = cam_width
        crop_width = min(crop_width, cam_width)
        crop_height = min(crop_height, cam_height)

    # Set the width and height to maximum
    # Logitech C920:


#    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, cam_width)
#    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, cam_height)
# Logitech Quickcam Pro 9000:
#    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1600)
#    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1200)
# Ipevo Ziggi HD:
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1920)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1080)

    img = np.zeros((cam_height, cam_width, 3),
                   np.uint8)  # initialise img as blank
    cv2.putText(img, "No camera", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 5.0,
                COLOUR_GREEN, 2)

    while True:
        if not freeze:  # "live" mode - grab a new image on each pass round this loop
            ret, img = cap.read()
            #            print ret
            working_img = img  # save creating a new object - directly modify the newly-grabbed frame
        else:  # freeze-frame
            if take_new_frame:
                take_new_frame = False
                for i in range(
                        0, 5
                ):  # (need to flush some buffered frames from the camera)
                    ret, img = cap.read()
            working_img = copy.copy(
                img
            )  # keep the grabbed frame unmodified, create a copy to work on

        # crop the captured frame, if required:
        if (crop_width < cam_width) or (crop_height < cam_height):
            working_img = working_img[0.5 * (cam_height - crop_height):0.5 *
                                      (cam_height + crop_height),
                                      0.5 * (cam_width - crop_width):0.5 *
                                      (cam_width + crop_width)]

        if rotate180:
            working_img = cv2.flip(
                working_img, -1
            )  # compensate for webcam being mounted effectively upside-down

        if magnification > 1.0:
            height, width = working_img.shape[:2]
            cropped_img = working_img[0.5 * height *
                                      (1 - 1 / magnification):0.5 * height *
                                      (1 + 1 / magnification), 0.5 * width *
                                      (1 - 1 / magnification):0.5 * width *
                                      (1 + 1 / magnification)]
            #working_img = cv2.resize(cropped_img, None, fx=magnification, fy=magnification, \
            # interpolation = cv2.INTER_CUBIC)
            working_img = cv2.resize(cropped_img, None, fx=magnification, fy=magnification, \
                                     interpolation=cv2.INTER_LINEAR)
        #OR
        #height, width = cropped_img.shape[:2]
        #res = cv2.resize(cropped_img, (magnification*width, magnification*height), interpolation = cv2.INTER_CUBIC)

        if brightness != 1.0 or contrast != 1.0 or sharpness != 1.0:
            pil_image = Image.fromarray(
                working_img)  # convert OpenCV image to PIL format

            if brightness != 1.0:
                enh = ImageEnhance.Brightness(pil_image)
                pil_image = enh.enhance(brightness)

            if contrast != 1.0:
                enh = ImageEnhance.Contrast(pil_image)
                pil_image = enh.enhance(contrast)

            if sharpness != 1.0:
                enh = ImageEnhance.Sharpness(pil_image)
                pil_image = enh.enhance(sharpness)

            working_img = np.array(
                pil_image)  # convert PIL-format image back to OpenCV format

        if autocontrast:
            grayscale_img = cv2.cvtColor(working_img, cv2.COLOR_BGR2GRAY)
            working_img = cv2.adaptiveThreshold(grayscale_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                cv2.THRESH_BINARY, 29, 2)
            if black_on_yellow:
                working_img = cv2.cvtColor(
                    working_img,
                    cv2.COLOR_GRAY2RGB)  # convert back to RGB image

        if black_on_yellow:
            # note that [:,:,0] is blue, [:,:,1] is green, [:,:,2] is red
            working_img[:, :, 0] = 0

        if inverted:
            working_img = 255 - working_img

        if autofocus:
            focusval = UVC_functions.get_focus()

        if show_params:
            if autocontrast:
                cv2.putText(working_img, "Autocontrast on", (20, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, COLOUR_WHITE, 2)
                cv2.putText(working_img, "Autocontrast on", (20, 70),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, COLOUR_BLACK, 2)
            else:
                cv2.putText(working_img, "Brightness: " + str(brightness),
                            (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            COLOUR_YELLOW, 2)
                cv2.putText(working_img, "Contrast: " + str(contrast),
                            (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            COLOUR_YELLOW, 2)
                cv2.putText(working_img, "Sharpness: " + str(sharpness),
                            (20, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            COLOUR_YELLOW, 2)
            cv2.putText(working_img, "Focus: " + str(focusval), (20, 150),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, COLOUR_YELLOW, 2)

        cv2.imshow("Video Magnifier", working_img)

        if not freeze:
            key_wait_time = 100  # image live-update mode - wait 100ms
        else:
            key_wait_time = 0  # i.e. if frame is frozen, wait indefinitely for a key to be pressed
        key = cv2.waitKey(
            key_wait_time) % 256  # LSB equates to ASCII code for most keys
        #        print(key)
        if key == 27:  # Esc - quit the program
            break
        elif key == ord('1'):  # 1 - normal colours
            inverted = False
        elif key == ord('2'):  # 2 - colours inverted
            inverted = True
        elif key == ord('3'):  # 3 - auto-contrast off
            autocontrast = False
        elif key == ord('4'):  # 4 - auto-contrast on
            autocontrast = True
        elif key == ord('5'):  # 5 - black-on-yellow mode off
            black_on_yellow = False
        elif key == ord('6'):  # 6 - black-on-yellow mode on
            black_on_yellow = True
        elif key == 176:  # num-pad 0 - decrease sharpness
            sharpness -= 0.1
            if sharpness < 0.0:
                sharpness = 0.0
        elif key == 174:  # num-pad . - increase sharpness
            sharpness += 0.1
            if sharpness > 10.0:
                sharpness = 10.0
        elif key == 177:  # num-pad 1 - 100% zoom
            magnification = 1.0
        elif key == 178:  # num-pad 2 - 200% zoom
            magnification = 2.0
        elif key == 179:  # num-pad 3 - 300% zoom
            magnification = 3.0
        elif key == 227:  # left-Ctrl - unfreeze frame
            freeze = False
        elif key == 82:  # up-arrow - increase brightness
            brightness += 0.1
            if brightness > 10.0:
                brightness = 10.0
        elif key == 84:  # down-arrow - decrease brightness
            brightness -= 0.1
            if brightness < 0.0:
                brightness = 0.0
        elif key == 81:  # left-arrow - decrease contrast
            contrast -= 0.1
            if contrast < 0.0:
                contrast = 0.0
        elif key == 83:  # right-arrow - increase contrast
            contrast += 0.1
            if contrast > 10.0:
                contrast = 10.0
        elif key == 80:  # Home key - reset to default colours
            brightness = 1.0
            contrast = 1.0
            sharpness = 1.0
            inverted = False
            autocontrast = False
            black_on_yellow = False
        elif key == 32:  # space-bar - freeze frame, or grab a new frozen frame
            if freeze:
                take_new_frame = True
            freeze = True
        elif key == 190:  # F1 - don't show parameter text overlay
            show_params = False
        elif key == 191:  # F2 - show parameters overlaid on image
            show_params = True
        elif key == 141:  # Num Enter - autofocus on
            UVC_functions.set_autofocus(True)
            autofocus = True
        elif key == 171:  # Num + - autofocus off, increase focus value
            if autofocus:
                UVC_functions.set_autofocus(False)
                autofocus = False
            focusval += 5
            UVC_functions.set_focus(focusval)
        elif key == 173:  # Num - - autofocus off, decrease focus value
            if autofocus:
                UVC_functions.set_autofocus(False)
                autofocus = False
            focusval -= 5
            UVC_functions.set_focus(focusval)
Exemple #19
0
def filterr(img):
    """takes a picture and puts a fliter on it"""
    try:
        print(
            """1.BLUR\t\t\t16.NEGATIVE LESS\t30.GINGHAM\n2.DETAIL\t\t17.NEGATIVE MORE\t31.ARTIC\n3.CONTOUR\t\t18.EQUALIZE\t\t32.DIBUJO\n4.EDGE_ENHANCE\t\t19.MORE BRIGHT\t\t33.CHARMES
5.EDGE_ENHANCE_MORE\t20.AUTOCONTRAST\n6.EMBOSS\t\t21.MORE CONTRAST\n7.FIND_EDGES\t\testos tardan un poco mas\n8.SMOOTH\t\t22.BLACK AND WHITE LESS
9.SMOOTH_MORE\t\t23.THRESHOLDS\n10.SHARPEN\t\t24.THRESHOLDS LESS\n11.SHARPEN MORE\t\t25.AVERAGE\n12.GRAYSCALE\t\t26.AVERAGE ALL NEGHBORS\n13.POSTERIZE\t\t27.OLD PICTURE
14.SOLARIZE\t\t28.ORANGE\n15.NEGATIVE\t\t29.VALENCIA""")
        i = input("Seleccione una opcion:")
        if i == "1":
            filpic = img.filter(ImageFilter.BLUR)
            filpic.show()
        elif i == "2":
            filpic = img.filter(ImageFilter.DETAIL)
            filpic.show()
        elif i == "3":
            filpic = img.filter(ImageFilter.CONTOUR)
            filpic.show()
        elif i == "4":
            filpic = img.filter(ImageFilter.EDGE_ENHANCE)
            filpic.show()
        elif i == "5":
            filpic = img.filter(ImageFilter.EDGE_ENHANCE_MORE)
            filpic.show()
        elif i == "6":
            filpic = img.filter(ImageFilter.EMBOSS)
            filpic.show()
        elif i == "7":
            filpic = img.filter(ImageFilter.FIND_EDGES)
            filpic.show()
        elif i == "8":
            filpic = img.filter(ImageFilter.SMOOTH)
            filpic.show()
        elif i == "9":
            filpic = img.filter(ImageFilter.SMOOTH_MORE)
            filpic.show()
        elif i == "10":
            filpic = img.filter(ImageFilter.SHARPEN)
            filpic.show()
        elif i == "11":
            filpic = ImageEnhance.Sharpness(img).enhance(9)
            filpic.show()
        elif i == "12":
            filpic = ImageOps.grayscale(img)
            filpic.show()
        elif i == "13":
            filpic = ImageOps.posterize(img, 1)
            filpic.show()
        elif i == "14":
            filpic = ImageOps.solarize(img)
            filpic.show()
        elif i == "15":
            filpic = ImageOps.invert(img)
            filpic.show()
        elif i == "16":
            filpic = ImageEnhance.Contrast(img).enhance(-2)
            filpic.show()
        elif i == "17":
            filpic = ImageEnhance.Contrast(img).enhance(-7)
            filpic.show()
        elif i == "18":
            filpic = ImageOps.equalize(img)
            filpic.show()
        elif i == "19":
            filpic = ImageEnhance.Brightness(img).enhance(2)
            filpic.show()
        elif i == "20":
            filpic = ImageOps.autocontrast(img, cutoff=2, ignore=None)
            filpic.show()
        elif i == "21":
            filpic = ImageEnhance.Contrast(img).enhance(2)
            filpic.show()
        elif i == "22":
            black_and_white_less(img)
        elif i == "23":
            thresholds(img)
        elif i == "24":
            thresholdsless(img)
        elif i == "25":
            average(img)
        elif i == "26":
            average_allneighbors(img)
        elif i == "27":
            sepia(img)
        elif i == "28":
            sepiaDF(img)
        elif i == "29":
            sepiaDFlessD(img)
        elif i == "30":
            sepiaDFless(img)
        elif i == "31":
            print("Se puede mejorar añadiendo Contraste")
            juno(img)
        elif i == "32":
            juno2(img)
        elif i == "33":
            notgray(img)
        else:
            print("Esa no vale")
        if int(i) <= 21 and int(i) != 0:
            yn = input("Quieres guardar(s/n):")
            saveimg(yn, filpic)
    except:
        print("No se pudo realizar la operacion")
Exemple #20
0
def align_dataset_mtcnn_url():
    rs_username = request.json['txtusername']
    directory = rs_username.replace(" ", "")
    parent_dir = os.path.expanduser("Dataset/FaceData/raw")
    path = os.path.join(parent_dir, directory)

    try:
        os.mkdir(path)
        print("Directory '% s' created" % directory)
    except OSError as err:
        message = "OS error: {0}".format(err)
        return make_response(
            jsonify(
                {"message": message}
            ),
            400,
        )
    url_img_list = request.json['url_list']

    if len(url_img_list) != 20:
        return make_response(
            jsonify(
                {"message": "Need 20 images for tranning"}
            ),
            400,
        )

    UPLOAD_FOLDER = os.path.expanduser("Dataset/FaceData/raw") + '/' + directory
    app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

    for i, url_img in enumerate(url_img_list):
        filename = 'image' + str(i) + '.jpg'
        urllib.request.urlretrieve(url_img, os.path.join(app.config['UPLOAD_FOLDER'], filename))
        flash('File successfulliy uploaded ' + filename + ' to the database!')

    os.system("python src/align_dataset_mtcnn.py  Dataset/FaceData/raw"
              + " Dataset/FaceData/processed --image_size 160 "
              + "--margin 32  --random_order --gpu_memory_fraction 0.25")

    output_dir = os.path.expanduser("Dataset/FaceData/processed/")
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    nrof_images_total = 0
    output_class_dir = os.path.join(output_dir, directory)
    if not os.path.exists(output_class_dir):
        os.makedirs(output_class_dir)
    for image_path in src.facenet.get_imgDirs("Dataset/FaceData/processed/" + directory):
        nrof_images_total += 1
        filename = os.path.splitext(os.path.split(image_path)[1])[0] + str(nrof_images_total)
        output_filename = os.path.join(output_class_dir, filename + '.png')
        print(image_path)
        image_obj = Image.open(image_path);
        rotated_image2 = image_obj.convert('L')
        rotated_image2.save(output_filename)
        image_flip = image_obj.transpose(Image.FLIP_LEFT_RIGHT)
        output_filename = os.path.join(output_class_dir, filename + 'a.png')
        image_flip.save(output_filename);
        output_filename = os.path.join(output_class_dir, filename + 'b.png')
        sharpness = ImageEnhance.Sharpness(image_obj);
        sharpness.enhance(1.5).save(output_filename);
        output_filename = os.path.join(output_class_dir, filename + 'c.png')
        color = ImageEnhance.Color(image_obj)
        color.enhance(1.5).save(output_filename)
        output_filename = os.path.join(output_class_dir, filename + 'd.png')
        brightness = ImageEnhance.Brightness(image_obj)
        brightness.enhance(1.5).save(output_filename);

    return make_response(
        jsonify(
            {"message": "Success"}
        ),
        200,
    )
    def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
        ranges = {
            "shearX": np.linspace(0, 0.3, 10),
            "shearY": np.linspace(0, 0.3, 10),
            "translateX": np.linspace(0, 150 / 331, 10),
            "translateY": np.linspace(0, 150 / 331, 10),
            "rotate": np.linspace(0, 30, 10),
            "color": np.linspace(0.0, 0.9, 10),
            "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
            "solarize": np.linspace(256, 0, 10),
            "contrast": np.linspace(0.0, 0.9, 10),
            "sharpness": np.linspace(0.0, 0.9, 10),
            "brightness": np.linspace(0.0, 0.9, 10),
            "autocontrast": [0] * 10,
            "equalize": [0] * 10,
            "invert": [0] * 10
        }

        # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
        def rotate_with_fill(img, magnitude):
            rot = img.convert("RGBA").rotate(magnitude)
            return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)

        func = {
            "shearX": lambda img, magnitude: img.transform(
                img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
                Image.BICUBIC, fillcolor=fillcolor),
            "shearY": lambda img, magnitude: img.transform(
                img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
                Image.BICUBIC, fillcolor=fillcolor),
            "translateX": lambda img, magnitude: img.transform(
                img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
                fillcolor=fillcolor),
            "translateY": lambda img, magnitude: img.transform(
                img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
                fillcolor=fillcolor),
            "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
            # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
            "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
            "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
            "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
            "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
            "equalize": lambda img, magnitude: ImageOps.equalize(img),
            "invert": lambda img, magnitude: ImageOps.invert(img)
        }

        # self.name = "{}_{:.2f}_and_{}_{:.2f}".format(
        #     operation1, ranges[operation1][magnitude_idx1],
        #     operation2, ranges[operation2][magnitude_idx2])
        self.p1 = p1
        self.operation1 = func[operation1]
        self.magnitude1 = ranges[operation1][magnitude_idx1]
        self.p2 = p2
        self.operation2 = func[operation2]
        self.magnitude2 = ranges[operation2][magnitude_idx2]
Exemple #22
0
 def __init__(self, Numbers=None, max_Magnitude=None, **kwargs):
     '''
     Custom image data generator.
     Behaves like ImageDataGenerator, but allows color augmentation.
     '''
     super().__init__(preprocessing_function=self.__call__, **kwargs)
     
     self.transforms = ['autocontrast', 'equalize', 'rotate', 'solarize', 'color', 'posterize',
                        'contrast', 'brightness', 'sharpness', 'shearX', 'shearY', 'translateX', 'translateY']
     if Numbers is None:
         self.Numbers = len(self.transforms) // 2
     else:
         self.Numbers = Numbers
     if max_Magnitude is None:
         self.max_Magnitude = 10
     else:
         self.max_Magnitude = max_Magnitude
     fillcolor = 128
     self.ranges = {
         # these  Magnitude   range , you  must test  it  yourself , see  what  will happen  after these  operation ,
         # it is no  need to obey  the value  in  autoaugment.py
         "shearX": np.linspace(0, 0.3, 10),
         "shearY": np.linspace(0, 0.3, 10),
         "translateX": np.linspace(0, 0.2, 10),
         "translateY": np.linspace(0, 0.2, 10),
         "rotate": np.linspace(0, 360, 10),
         "color": np.linspace(0.0, 0.9, 10),
         "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
         "solarize": np.linspace(256, 231, 10),
         "contrast": np.linspace(0.0, 0.5, 10),
         "sharpness": np.linspace(0.0, 0.9, 10),
         "brightness": np.linspace(0.0, 0.3, 10),
         "autocontrast": [0] * 10,
         "equalize": [0] * 10,           
         "invert": [0] * 10
     }
     self.func = {
         "shearX": lambda img, magnitude: img.transform(
             img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
             Image.BICUBIC, fill=fillcolor),
         "shearY": lambda img, magnitude: img.transform(
             img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
             Image.BICUBIC, fill=fillcolor),
         "translateX": lambda img, magnitude: img.transform(
             img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
             fill=fillcolor),
         "translateY": lambda img, magnitude: img.transform(
             img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
             fill=fillcolor),
         "rotate": lambda img, magnitude: self.rotate_with_fill(img, magnitude),
         # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
         "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
         "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
         "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
         "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
         "equalize": lambda img, magnitude: img,
         "invert": lambda img, magnitude: ImageOps.invert(img)
     }
Exemple #23
0
def test_crash():
    # crashes on small images
    im = Image.new("RGB", (1, 1))
    ImageEnhance.Sharpness(im).enhance(0.5)
    for j in range(regen):
        z = np.random.normal(loc=0.0, scale=1.0, size=(batch_size, z_dim)).astype(np.float32)
        gen = sess.run(net_g.out, feed_dict={t_real_caption: all_cap, t_z: z})
        score = sess.run(disc.out, feed_dict={fake_image: gen,t_real_caption: all_cap}).reshape([-1])
        for k in range(batch_size):
            if score[k]<best_score[k]:
                best_imgs[k,:,:,:] = gen[k,:,:,:]
                best_score[k] = score[k]

    for j in range(batch_size):
        image_path = 'inference/inference_'+test_sentence['ID'].values[i*batch_size+j]+'.png'
        images =  best_imgs[j]*0.5+0.5

        #########################################
        img = Image.fromarray(np.uint8(images*255))
        enhancer = ImageEnhance.Sharpness(img)
        img  = enhancer.enhance(3.0)


        #don't use color enhance
        # enhancer = ImageEnhance.Color(img)
        # images  = np.array(enhancer.enhance(1.3))

        #########################################

        scipy.misc.imsave(image_path,(img))
################################### deal with last batch

all_cap = []
for i in range(n_caption_test-batch_size,n_caption_test):
    test_cap = test_sentence['Captions'].values[i]
Exemple #25
0
def sharpness(img, factor, **__):
    return ImageEnhance.Sharpness(img).enhance(factor)
Exemple #26
0
    img_denoised = np.logical_not(sgm).astype(np.uint8) * 255

    # Save denoised image
    cv2.imwrite(Working_Path + '\\' + 'Denoised_' + index, img_denoised)

    # Load image, grayscale, Gaussian blur, adaptive threshold
    img = cv2.imread(Working_Path + '\\' + 'Denoised_' + index)
    ret, thresh = cv2.threshold(img, 55, 255, cv2.THRESH_BINARY)
    opening = cv2.morphologyEx(
        thresh, cv2.MORPH_OPEN,
        cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)))
    cv2.imwrite(Procees_Path + '\\' + 'Enhancexd_' + index, opening)

    #Enhance The Image
    image = Image.open(Procees_Path + '\\' + 'Enhancexd_' + index)
    enh_sha = ImageEnhance.Sharpness(image)
    sharpness = 100
    image_sharped = enh_sha.enhance(sharpness)
    image4 = np.array(image_sharped)
    cv2.imwrite(Final_Input + '\\' + 'Final_' + index, image4)

    # ********************************************************************************************************************************
    #OCR Part
    # ********************************************************************************************************************************

    im = Image.open(Final_Input + '\\' + 'Final_' + index)  # the second one
    im = im.filter(ImageFilter.ModeFilter())  #Filter The Image
    enhancer = ImageEnhance.Sharpness(im)
    im = enhancer.enhance(4)
    im = im.convert('1')
    im.save('temp2.jpg')
Exemple #27
0
def sharpness(pil_img, level):
    level = float_parameter(sample_level(level), 1.8) + 0.1
    return ImageEnhance.Sharpness(pil_img).enhance(level)
    def __init__(self,
                 dataset,
                 transformations=None,
                 n_classes=8000,
                 n_trans=100,
                 max_elms=10,
                 p=0.5):
        """ExemplarNet dataset.

        Args:
            dataset (torch.utils.data.Dataset): The dataset to train on.
            transformations (list, optional): Type of elementar transformations to use.
            n_classes (int, optional): Number of classes, i.e. the subset size of the dataset. Defaults to 8000.
            n_trans (int, optional): Number of combined transformations. Defaults to 100.
            max_elms (int, optional): Number of elementar transformations per combined transformation. Defaults to 10.
            p (float, optional): Prob. of an elmentar transformation to be part of a combined transformation. Defaults to 0.5.
        """
        pool = [
            transforms.RandomRotation(  # Rotation
                30,
                resample=False,
                expand=False,
                center=None,
                fill=None),
            transforms.RandomAffine(  # Shearing
                0,
                translate=None,
                scale=None,
                shear=30,
                resample=False,
                fillcolor=0),
            transforms.RandomAffine(  # Translate
                0,
                translate=(0.3, 0.3),
                scale=None,
                shear=None,
                resample=False,
                fillcolor=0),
            transforms.Lambda(lambda x: imo.autocontrast(x)),  # Autocontrast
            transforms.Lambda(lambda x: imo.invert(x)),  # Invert
            transforms.Lambda(lambda x: imo.equalize(x)),  # Equalize
            transforms.Lambda(lambda x: imo.solarize(x)),  # Solarize
            transforms.Lambda(lambda x: imo.posterize(
                x, bits=int(np.random.randint(4, 8) + 1))),  # Posterize
            transforms.Lambda(
                lambda x: ime.Color(x).enhance(np.random.uniform())),  # Color
            transforms.Lambda(lambda x: ime.Brightness(x).enhance(
                np.random.uniform())),  # Brightness
            transforms.Lambda(lambda x: ime.Contrast(x).enhance(
                np.random.uniform())),  # Contrast
            transforms.Lambda(lambda x: ime.Sharpness(x).enhance(
                np.random.uniform())),  # Sharpness
            transforms.Compose(  # Set black
                [
                    transforms.ToTensor(),
                    transforms.RandomErasing(1.0),
                    transforms.ToPILImage()
                ]),
            transforms.Lambda(
                lambda x: transforms.functional.to_grayscale(  # Grayscale
                    x, num_output_channels=3)),
            transforms.Lambda(lambda x: elastic_transform(x, sigma=10))
        ]

        # Processes full images and apply random cropping instead of gradient based sampling.
        indices = torch.randint(len(dataset), (n_classes, )).long()
        self.dataset = Subset(dataset, indices)
        self.p = p
        self.n_trans = n_trans
        elm_transformations = transformations if transformations is not None else pool

        self.transformations = []
        for _ in range(self.n_trans):
            transformation = []
            for t in range(max_elms):
                if random.random() < self.p:
                    transformation.append(
                        transforms.RandomChoice(elm_transformations))
            self.transformations.append(transforms.Compose(transformation))
Exemple #29
0
def main():
    #default device
    device = '/device:CPU:0'

    parser = argparse.ArgumentParser()
    parser.add_argument('--model-name',
                        help='Model name to use for classification',
                        type=str)
    parser.add_argument('--captcha-dir',
                        help='Where to read the captchas to break',
                        type=str)
    parser.add_argument('--output',
                        help='File where the classifications should be saved',
                        type=str)
    parser.add_argument('--symbols',
                        help='File with the symbols to use in captchas',
                        type=str)
    parser.add_argument('--gpu', help='used to run in gpu', type=str)
    args = parser.parse_args()

    if args.model_name is None:
        print("Please specify the CNN model to use")
        exit(1)

    if args.captcha_dir is None:
        print("Please specify the directory with captchas to break")
        exit(1)

    if args.output is None:
        print("Please specify the path to the output file")
        exit(1)

    if args.symbols is None:
        print("Please specify the captcha symbols file")
        exit(1)

    if args.gpu == 'gpu':
        physical_devices = tf.config.experimental.list_physical_devices('GPU')
        assert len(physical_devices) > 0, "No GPU available!"
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
        device = '/device:GPU:0'

    symbols_file = open(args.symbols, 'r')
    captcha_symbols = symbols_file.readline().strip()
    symbols_file.close()

    print("Classifying captchas with symbol set {" + captcha_symbols + "}")

    with tf.device(device):
        with open(args.output, 'w') as output_file:
            json_file = open(args.model_name + '.json', 'r')
            loaded_model_json = json_file.read()
            json_file.close()
            model = keras.models.model_from_json(loaded_model_json)
            model.load_weights(args.model_name + '.h5')
            model.compile(loss='categorical_crossentropy',
                          optimizer=keras.optimizers.Adam(1e-3, amsgrad=True),
                          metrics=['accuracy'])

            for x in os.listdir(args.captcha_dir):
                # load image and preprocess it
                image = Image.open(os.path.join(args.captcha_dir, x))
                image = ImageOps.autocontrast(image, cutoff=10, ignore=None)
                image = ImageEnhance.Sharpness(image)
                image = image.enhance(10.0)
                image = ImageOps.grayscale(image)
                image = numpy.array(image)
                image = cv2.threshold(image, 0, 255,
                                      cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
                image = numpy.array(image) / 255.0
                image = numpy.expand_dims(image, axis=2)
                (c, h, w) = image.shape
                image = image.reshape([-1, c, h, w])
                prediction = model.predict(image)
                output_file.write(x + "," +
                                  decode(captcha_symbols, prediction) + "\n")

                print('Classified ' + x)
    print("Classifying captchas completed!")
Exemple #30
0
def sharpness(img, magnitude):
    magnitudes = np.linspace(0.1, 1.9, 11)
    img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
    return img