from PIL import Image
from PIL import ImageEnhance

img = Image.open("gato.jpg")
applier = ImageEnhance.Brightness(img)
img2 = applier.enhance(5)
applier2 = ImageEnhance.Contrast(img2)
img3 = applier2.enhance(6)

img3.save('output.jpg')
Exemple #2
0
#upper_limit = st.sidebar.slider('Maximum Gray Scale value ', 20,255,100)
#gray_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
#st.write(Gray_image)
#image = load_img(uploaded_file, target_size=(224, 224))
#gray_image = img_to_array(input_image)
enhance_type = st.sidebar.radio('Enhance Type', ['Original', 'Gray-Scale', 'Contrast', 'Brightness', 'Blurring'])
if enhance_type == 'Gray-Scale':
            new_img = np.array(our_image.convert('RGB'))
            img = cv2.cvtColor(new_img, 1)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # st.write(new_img)
            st.image(gray)

if enhance_type == 'Contrast':
            c_rate = st.sidebar.slider('Contrast', 0.5, 3.5)
            enhancer = ImageEnhance.Contrast(our_image)
            img_output = enhancer.enhance(c_rate)
            st.image(img_output)

if enhance_type == 'Brightness':
            c_rate = st.sidebar.slider('Brightness', 0.5, 3.5)
            enhancer = ImageEnhance.Brightness(our_image)
            img_output = enhancer.enhance(c_rate)
            st.image(img_output)

if enhance_type == 'Blurring':
            new_img = np.array(our_image.convert('RGB'))
            blur_rate = st.sidebar.slider('Blurring', 0.5, 3.5)
            img = cv2.cvtColor(new_img, 1)
            blur_img = cv2.GaussianBlur(img, (11, 11), blur_rate)
            st.image(blur_img)
from PIL import ImageEnhance
import Image
image = Image.open("accept.jpg")

color= ImageEnhance.Sharpness(image)#0.0-1.0
sharp=ImageEnhance.Color(image)#0.0-1.0-2.0
contrast=ImageEnhance.Contrast(image)#0.0-1.0 gray to orginal
bright=ImageEnhance.Brightness(image)#0.0-1.0 black to orginal

fc = 2.0
im=sharp.enhance(fc)
#im=bright.enhance(fc)
im.save("im" + '.jpg')
Exemple #4
0
    #存在小块的干扰噪声,需要进行滤波

    src1 = Image.open(
        'c1_contrast1.jpg'
    )  # imread的图像为数组,image其自带的open方法无法处理,mode不对应,open返回一个pil对象

    # 亮度增强
    enh_bri = ImageEnhance.Brightness(src1)
    brightness = 1.3
    gf_brightened = enh_bri.enhance(brightness)
    gf_brightened.show(title='gf_brightened')
    # gf_brightened.save('gf_brightened.jpg')

    #对比度增强
    enh_con = ImageEnhance.Contrast(gf_brightened)
    contrast = 1.5
    gf_contrast = enh_con.enhance(contrast)
    gf_contrast.show(title='gf_contrast')
    #gf_contrast.save('gf_contrast.jpg')

    #锐化增强
    enh_sha = ImageEnhance.Sharpness(gf_contrast)
    sharpness = 5.0
    gf_sharped = enh_sha.enhance(sharpness)
    gf_sharped.show(title='gf_sharped')
    #gf_sharped.save('gf_sharped.jpg')

    #色度增强
    enh_col = ImageEnhance.Color(gf_sharped)
    color = 1
Exemple #5
0
def contrast(img, factor, **__):
    return ImageEnhance.Contrast(img).enhance(factor)
Exemple #6
0
def main():
    st.sidebar.header('PyGram')
    st.sidebar.info('Feito 100% em Python :)')
    st.sidebar.subheader(
        'App para aplicar filtros em imagens, utilizando a bilioteca OpenCV.')

    opcoes_menu = ['Sobre', 'Filtros']
    escolha = st.sidebar.selectbox('Escolha uma opção', opcoes_menu)

    our_image = Image.open('empty.jpg')

    if escolha == 'Sobre':
        st.title('Masterclass - visão computacional')
        st.markdown('Projeto voltado para aplicação de filtros em imagens')

    elif escolha == 'Filtros':
        st.title("Masterclass - Visão Computacional")

        st.subheader('Carregar arquivo de imagem')
        image_file = st.file_uploader('Escolha uma imagem',
                                      type=['jpg', 'png', 'jpeg'])

        if image_file is not None:
            our_image = Image.open(image_file)
            st.sidebar.text('Imagem original')
            st.sidebar.image(our_image, width=150)

        col1, col2 = st.beta_columns(2)

        filtros = st.sidebar.radio('Filtros', [
            'Original', 'Grayscale', 'Desenho', 'Sépia', 'Blur', 'Canny',
            'Contraste'
        ])

        if filtros == 'Grayscale':
            converted_image = np.array(our_image.convert('RGB'))
            gray_image = cv2.cvtColor(converted_image, cv2.COLOR_RGB2GRAY)

            col1.header('Original')
            col1.image(our_image, use_column_width=True)
            col2.header('Grayscale')
            col2.image(gray_image, use_column_width=True)

            if st.button('Save'):
                cv2.imwrite('donwload_photo_gray.jpg', gray_image)

        elif filtros == 'Desenho':
            converted_image = np.array(our_image.convert('RGB'))
            gray_image = cv2.cvtColor(converted_image, cv2.COLOR_RGB2GRAY)
            inv_gray_image = 255 - gray_image
            blur_image = cv2.GaussianBlur(inv_gray_image, (21, 21), 0, 0)
            sketch_image = cv2.divide(gray_image, blur_image - 255, scale=256)

            col1.header('Original')
            col1.image(our_image, use_column_width=True)
            col2.header('Grayscale')
            col2.image(sketch_image, use_column_width=True)

            if st.button('Save'):
                cv2.imwrite('donwload_photo_sketch.jpg', sketch_image)

        elif filtros == 'Sépia':
            converted_image = np.array(our_image.convert('RGB'))
            gray_image = cv2.cvtColor(converted_image, cv2.COLOR_RGB2GRAY)
            kernel = np.array([[0.272, 0.534, 0.131], [0.349, 0.686, 0.168],
                               [0.393, 0.769, 0.189]])
            sepia_image = cv2.filter2D(converted_image, -1, kernel)

            col1.header("Original")
            col1.image(our_image, use_column_width=True)
            col2.header("Sépia")
            col2.image(sepia_image, channels="BGR", use_column_width=True)

            if st.button('Save'):
                cv2.imwrite('donwload_photo_sepia.jpg', sepia_image)

        elif filtros == 'Blur':
            b_amount = st.sidebar.slider('Kernel (n x n)', 3, 27, 9, step=2)
            converted_image = np.array(our_image.convert('RGB'))
            converted_image = cv2.cvtColor(converted_image, cv2.COLOR_RGB2BGR)
            blur_image = cv2.GaussianBlur(converted_image,
                                          (b_amount, b_amount), 0, 0)

            col1.header("Original")
            col1.image(our_image, use_column_width=True)
            col2.header("Blur")
            col2.image(blur_image, channels="BGR", use_column_width=True)

            if st.button('Save'):
                cv2.imwrite('donwload_photo_blur.jpg', blur_image)

        elif filtros == 'Canny':
            converted_image = np.array(our_image.convert('RGB'))
            converted_image = cv2.cvtColor(converted_image, cv2.COLOR_RGB2BGR)
            blur_image = cv2.GaussianBlur(converted_image, (11, 11), 0)
            canny = cv2.Canny(blur_image, 100, 150)

            col1.header('Original')
            col1.image(our_image, use_column_width=True)
            col2.header('Canny Edge Detection')
            col2.image(canny, use_column_width=True)

            if st.button('Save'):
                cv2.imwrite('donwload_photo_canny.jpg', canny)

        elif filtros == 'Contraste':
            c_amount = st.sidebar.slider('Contraste', 0.0, 2.0, 1.0)
            enhancer = ImageEnhance.Contrast(our_image)
            contrast_image = enhancer.enhance(c_amount)

            col1.header("Original")
            col1.image(our_image, use_column_width=True)
            col2.header("Contraste")
            col2.image(contrast_image, use_column_width=True)

        elif filtros == 'Original':
            st.image(our_image, width=OUTPUT_WIDTH)

        else:
            st.image(our_image, width=OUTPUT_WIDTH)
Exemple #7
0
# save full res image

#Main loop
while True:
    stream.seek(0)

    # View Loop
    for foo in camera.capture_continuous(stream,
                                         format='jpeg',
                                         use_video_port=True):
        stream.seek(
            0
        )  # "Rewind" the stream to the beginning so we can read its content
        image_source = Image.open(stream)
        imageResized = image_source.resize((S_WIDTH, S_HEIGHT), Image.NEAREST)
        imageEnancer = ImageEnhance.Contrast(imageResized)
        imageContrasted = imageEnancer.enhance(2)
        imageInverted = ImageOps.invert(imageContrasted)
        imagedithered = imageInverted.convert(
            '1')  # convert image to black or white

        lcd.write(imagedithered.tobytes())

        stream.seek(0)

        if GPIO.event_detected(SHOT_PIN):
            isShot = True
            break
        if GPIO.event_detected(PRINT_PIN):
            break
Exemple #8
0
 def __optimize(image):
     image = image.convert('L')
     image = ImageProcessor.__scale_to_working_size(image)
     image = ImageEnhance.Contrast(image).enhance(2)
     return image
Exemple #9
0
 def _apply_contrast(data: np.ndarray, factor: float) -> np.ndarray:
     im = Image.fromarray(data)
     im = ImageEnhance.Contrast(im).enhance(factor)
     return np.array(im)
Exemple #10
0
for infile in glob.glob("*.jpg"):
    file, ext = os.path.splitext(infile)
    im = Image.open(infile)
    img = Image.open(infile)
    img = img.transpose(Image.FLIP_LEFT_RIGHT)
    img.save(file + "F.jpg", "JPEG")
    #    width = 400
    #    height = 250
    #    im = im.resize((width, height), Image.ANTIALIAS)  # best down-sizing filter
    #    im1 = im.filter(ImageFilter.GaussianBlur(radius=1))
    #    im1 = im.filter(ImageFilter.MaxFilter(size=3))
    #    out = im.convert("P", palette=Image.ADAPTIVE, colors=256)
    #    im.ImageEnhance.Contrast(im)
    enhancer = ImageEnhance.Brightness(im)
    enhanced = enhancer.enhance(0.5)
    enhancer = ImageEnhance.Contrast(enhanced)
    enhanced = enhancer.enhance(1.1)
    enhancer = ImageEnhance.Sharpness(enhanced)
    enhanced = enhancer.enhance(1.5)
    enhanced.save(file + "B.jpg", "JPEG")

    enhancer = ImageEnhance.Brightness(img)
    enhanced = enhancer.enhance(0.5)
    enhancer = ImageEnhance.Contrast(enhanced)
    enhanced = enhancer.enhance(1.1)
    enhancer = ImageEnhance.Sharpness(enhanced)
    enhanced = enhancer.enhance(1.5)
    enhanced.save(file + "FB.jpg", "JPEG")

    #    enhanced.show()
    #    out.show()
Exemple #11
0
def simpleImageAugmentation(files, storepath, pickleFilename, enhance=20):
    '''
    input : 
           files:       A string .
                        file path to the train and target folder .
                        Note that the train target path are separate
                        but have been zipped into single pair

           filepath:    a string .
                        file path for the storage of the final ziped(train,target) array
                        The array of augmented train-target pair is saved as pickle file
                        into this directory



    Note : the essence of this entire function is to perform (simple data augmentation).
    This is to give us more sample data for experimentation. It is possible to use Albumentation
    library (https://github.com/albumentations-team/albumentations) t achieve similar or even more sophiscated transforms but for didactic purposes
    this transformation here are more than enough

    Note also that File paths here are loaded from the config.yml file
    '''

    ################################################################################
    # final Image stored in an array
    # images are stored as tuple of input-target pair in this array
    ImageStore = []
    ################################################################################

    ################################################################################
    # loop trhrough the train set and target pair folder
    # the file path are speciffied in the config.yml file
    for filepath_train, filepath_target in files:

        #print(filepath_train,filepath_target)
        try:
            # Attempt to open an image file
            # note RGB is specified here because the mode is RGBA in PIL
            # library by default. So one need to conver to RGB
            image = Image.open(filepath_train).convert('RGB')
            # print image mode if necessary
            #print(image.mode)
            target = Image.open(filepath_target).convert('L')
            # print target  mode if necessary
            #print(target.mode)
        except (IOError):
            # Report error if file path is wrong,
            # and then break
            print("Problem opening image files: ", files, ":", IOError)
            break
    ################################################################################

    ################################################################################
    # put the original trian and target pair in a list
        image_target_pair = [image, target]
        # convert the list to a tuple and append to the ImageStore
        ImageStore.append(tuple(image_target_pair))
        # define mirror and flip function using lambda expression
        # if ops is m then its mirror transformation otherwise do a flipping operation
        # ops specify the type of operation
        transMirr = lambda image, ops: ImageOps.mirror(
            image) if ops == 'm' else ImageOps.flip(image)

        #define enhance and roatte function using lambda expression
        # the val is the rotation angle for rotation operation
        # it is the enhancment value for enhancement or contrast operation
        rotateEnhanceImage = lambda image, val, ops: image.rotate(
            val, resample=0, expand=0
        ) if ops == 'r' else ImageEnhance.Contrast(image).enhance(val)
        ################################################################################

        ################################################################################
        # mirror, flip, rotate and enhance operation performed here

        # Mirror Image using  Image target pair in the list image_target_pair and store thereafter
        imageMirror = tuple([transMirr(i, 'm') for i in image_target_pair])
        #print(skimage.img_as_float(imageMirror[0]).shape)
        ImageStore.append(imageMirror)
        # transpose Image and target pair using transMirr, ops is now f which means flip
        imgageTransposes = tuple(
            [transMirr(i, 'f') for i in image_target_pair])
        #print(skimage.img_as_float(imgageTransposes[0]).shape)
        # save
        ImageStore.append(imgageTransposes)
        #  to perform rotation
        # store angles to be used in a list. this is an arbitrary choice of angle (multiple of 3)
        angles = [i for i in range(2, 359) if i % 3 == 0]
        # roate images using the anles in list above then store

        #-----------------------------------------------------------------
        ### release this later
        # image_targetPairRotated = [(rotateEnhanceImage(image,i,'r'), rotateEnhanceImage(target,i,'r')) for i in angles ]
        # ImageStore.extend(image_targetPairRotated)
        #-----------------------------------------------------------------

        # enhancement option
        # store the enhancement value in list
        enhance = [random.uniform(1.2, 1.9) for i in range(enhance)]
        # enahance the image using the enhancement value in list
        image_targetEnhanced = [(rotateEnhanceImage(image, i, 'e'),
                                 rotateEnhanceImage(target, i, 'e'))
                                for i in enhance]
        #image_targetEnhanced[0].show()
        #print(skimage.img_as_float(image_targetEnhanced[0][0]).shape)
        ImageStore.extend(image_targetEnhanced)
        ################################################################################

        ################################################################################
        # transpose flip mask   operation
        # this function has been defined  in this file
        # apply this function on image  and store
        image_targetTransposeFlipped = tuple([
            trapsoseFlipRotate(i, j) for i, j in enumerate(image_target_pair)
        ])
        #print(skimage.img_as_float(image_targetTransposeFlipped[0]).shape)
        ImageStore.append(image_targetTransposeFlipped)
    ################################################################################

    # ################################################################################
    # save the ImageStore list file as pickle
    # now we have the training target pair . One is ready to begin traning
    filename = pickleFilename + '.pkl'
    with open(storepath + filename, 'wb') as f:
        pkl.dump(ImageStore, f)

    print('Data preprocesing and storage Successful. \n Total Data size : ',
          len(ImageStore))

    # clean up after the whole preprocessing. Saving some space
    del ImageStore
    gc.collect()
Exemple #12
0
    #SecretCode = raw_input('please enter the code: ')
    #----------------------------------------------------------------------
    '''

    #--------------------图片增强+自动识别简单验证码-----------------------------
    #time.sleep(3)防止由于网速,可能图片还没保存好,就开始识别
    def image_file_to_string(file):
        cwd = os.getcwd()
        try :
            os.chdir("C:\Users\MrLevo\Anaconda2\Lib")
            return pytesser.image_file_to_string(file)
        finally:
            os.chdir(cwd)
    im=Image.open("E:\\image_code.jpg")
    imgry = im.convert('L')#图像加强,二值化
    sharpness =ImageEnhance.Contrast(imgry)#对比度增强
    sharp_img = sharpness.enhance(2.0)
    sharp_img.save("E:\\image_code.jpg")
    #http://www.cnblogs.com/txw1958/archive/2012/02/21/2361330.html
    #imgry.show()#这是分布测试时候用的,整个程序使用需要注释掉
    #imgry.save("E:\\image_code.jpg")

    code= pytesser.image_file_to_string("E:\\image_code.jpg")#code即为识别出的图片数字str类型
    print code
    #打印code观察是否识别正确


    #----------------------------------------------------------------------
    if i <= 2: # 根据自己登录特性,我这里是验证码失败一次,重填所有,失败两次,重填验证码
        elem_user.send_keys('S315080092')
        elem_psw.send_keys('xxxxxxxxxx')
Exemple #13
0
    def effect(self, effect):

        filepath, ext = os.path.splitext(self.pilimage)
        edit_path = filepath + 'edited' + ext

        if effect == 'brightness':

            img = Image.open(self.pilimage)
            enh = ImageEnhance.Brightness(img)
            img = enh.enhance(1.8)

        if effect == 'grayscale':

            img = Image.open(self.pilimage).convert('L')

        if effect == 'blackwhite':

            img = Image.open(self.pilimage).convert('1')

        if effect == 'sepia':

            serpia = make_linear_ramp((255, 240, 192))
            img = Image.open(self.pilimage).convert('L')
            img.putpalette(serpia)

        if effect == 'contrast':

            img = Image.open(self.pilimage)
            enh = ImageEnhance.Contrast(img)
            img = enh.enhance(2.0)

        # Filters here
        if effect == 'blur':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.BLUR)

        if effect == 'findedges':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.FIND_EDGES)

        if effect == 'bigenhance':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.EDGE_ENHANCE_MORE)

        if effect == 'enhance':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.EDGE_ENHANCE)

        if effect == 'smooth':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.SMOOTH_MORE)

        if effect == 'emboss':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.EMBOSS)

        if effect == 'contour':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.CONTOUR)

        if effect == 'sharpen':

            img = Image.open(self.pilimage)
            img = img.filter(ImageFilter.SHARPEN)

        img.save(edit_path, format='PNG', quality=100)
Exemple #14
0
    def process(self, image_width, image_height, image_translate_x,
                image_translate_y, image_flip_x, image_flip_y,
                image_rotate_degree, image_contrast, image_brightness,
                image_blur, image_saturate, image_opacity, frame_width,
                frame_height):
        """
        Run process operations under image and return edited image path.
        If USE_CACHE is enabled and cache exists, method returns cached image path.

        :Args:
          - self (:class:`BkImageEditor`): BkImageEditor instance.
          - image_width (:class:`int`): Resize image to selected width.
          - image_height (:class:`int`): Resize image to selected height.
          - image_translate_x (:class:`int`): Move image by X axis. 0:0 is frame left top corner.
          - image_translate_y (:class:`int`): Move image by Y axis. 0:0 is frame left top corner.
          - image_flip_x (:class:`bool`): Flip image X axis.
          - image_flip_y (:class:`bool`): Flip image Y axis.
          - image_rotate_degree (:class:`int`): Rotate image degree.
          - image_contrast (:class:`int`): Image contrast level.
          - image_brightness (:class:`int`): Image brightness level.
          - image_blur (:class:`int`): Image blur level.
          - image_saturate (:class:`int`): Image saturate level.
          - image_opacity (:class:`int`): Image opacity level.
          - frame_width (:class:`int`): Frame/crop area width.
          - frame_height (:class:`int`): Frame/crop area height.

        :Returns:
          Path to edited image
        """

        self._image_width = image_width
        self._image_height = image_height
        self._image_translate_x = image_translate_x
        self._image_translate_y = image_translate_y
        self._image_flip_x = image_flip_x
        self._image_flip_y = image_flip_y
        self._image_rotate_degree = image_rotate_degree
        self._image_contrast = image_contrast
        self._image_brightness = image_brightness
        self._image_blur = image_blur
        self._image_saturate = image_saturate
        self._image_opacity = image_opacity
        self._frame_width = frame_width
        self._frame_height = frame_height

        output_filepath = os.path.join(self._cache_folder,
                                       self.output_filename)

        # cache folder
        if not os.path.exists(self._cache_folder):
            # folder can be created by another process between os.path.exists and os.makedirs
            try:
                os.makedirs(self._cache_folder)
            except OSError:
                pass

        if self.USE_CACHE:
            if os.path.exists(output_filepath):
                return output_filepath

        try:

            # converted to have an alpha layer
            pil_region = self._input_image_file.convert(
                self._input_image_file.mode)

            # try to get icc profile
            try:
                icc_profile = self._input_image_file.info.get("icc_profile")
            except:
                icc_profile = None

            # resize image (not frame)
            pil_region = pil_region.resize(
                (self._image_width, self._image_height), Image.ANTIALIAS)

            # scale image (flip)
            if self._image_flip_x:
                pil_region = pil_region.transpose(Image.FLIP_LEFT_RIGHT)

            if self._image_flip_y:
                pil_region = pil_region.transpose(Image.FLIP_TOP_BOTTOM)

            # rotate image
            if self._image_rotate_degree != 0:
                pil_region = pil_region.rotate(self._image_rotate_degree)

            # apply frame cropping
            if self._image_rotate_degree in (-270, -90, 90, 270):
                xsize, ysize = self._image_width, self._image_height

                # initial image left-top coordiantes relatively to the frame
                x, y = self._image_translate_x, self._image_translate_y

                # image center coordinates
                xc = x + xsize / 2
                yc = y + ysize / 2

                # rotate degree
                rotate_deg = self._image_rotate_degree
                rotate_radians = math.radians(rotate_deg)

                # calculate left-top image coordinates (relatively to the frame) after rotation
                # used formula:
                # X = x0 + (x - x0) * cos(a) - (y - y0) * sin(a)
                # Y = y0 + (y - y0) * cos(a) + (x - x0) * sin(a)
                x1 = xc + (x - xc) * math.cos(rotate_radians) - (
                    y - yc) * math.sin(rotate_radians)
                y1 = yc + (y - yc) * math.cos(rotate_radians) + (
                    x - xc) * math.sin(rotate_radians)

                if rotate_deg in (-270, 90):
                    x1 -= ysize
                # -90, 270
                else:
                    y1 -= xsize

                frame = (int(x1) * (-1),
                         int(y1) * (-1), int(x1) * (-1) + self._frame_width,
                         int(y1) * (-1) + self._frame_height)
            else:
                frame = (self._image_translate_x * (-1),
                         self._image_translate_y * (-1),
                         self._image_translate_x * (-1) + self._frame_width,
                         self._image_translate_y * (-1) + self._frame_height)

            pil_region = pil_region.crop(frame)

            # contrast
            contr = ImageEnhance.Contrast(pil_region)
            pil_region = contr.enhance(self._image_contrast)

            # brightness
            brightness = ImageEnhance.Brightness(pil_region)
            pil_region = brightness.enhance(self._image_brightness)

            # saturate
            saturate = ImageEnhance.Color(pil_region)
            pil_region = saturate.enhance(self._image_saturate)

            # blur
            # TODO test this part one more time
            pil_region = pil_region.filter(
                ImageFilter.GaussianBlur(self._image_blur))

            if icc_profile:
                pil_region.save(output_filepath,
                                quality=self.QUALITY,
                                dpi=(self.DPI, self.DPI),
                                icc_profile=icc_profile)
            else:
                pil_region.save(output_filepath,
                                quality=self.QUALITY,
                                dpi=(self.DPI, self.DPI))

            return output_filepath

        except (IOError, Exception) as e:
            logger.exception("BkImageEditor: {}. Image: {}".format(
                e.message, self._input_image_file))
            return None
Exemple #15
0
import pytesseract
from PIL import Image, ImageEnhance, ImageFilter

im = Image.open('Plate6_Fotor.jpg')  # img is the path of the image
im = im.convert("RGBA")
newimdata = []
datas = im.getdata()

for item in datas:
    if item[0] < 112 or item[1] < 112 or item[2] < 112:
        newimdata.append(item)
    else:
        newimdata.append((255, 255, 255))
im.putdata(newimdata)

im = im.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(2)
im = im.convert('1')
im.save('temp2.jpg')
text = pytesseract.image_to_string(
    Image.open('temp2.jpg'),
    config=
    '-c tessedit_char_whitelist=0123456789abcdefghijklmnopqrstuvwxyz -psm 6',
    lang='eng')
print(text)
Exemple #16
0
filtered = filtered.resize((w*9, h*16))
width, height = filtered.size

if args.invert:
    filtered = ImageOps.invert(filtered)

if not args.raw:
    print("PRE-PROCESSING IMAGE")
    filtered = ImageOps.autocontrast(filtered)
    filtered = filtered.filter(ImageFilter.CONTOUR)
    filtered = ImageOps.invert(filtered)
    filtered = ImageChops.subtract(filtered, ImageChops.constant(filtered, 50).convert('RGB'))
    filtered = filtered.filter(ImageFilter.BLUR)
    filtered = ImageEnhance.Brightness(filtered).enhance(2)
    filtered = ImageEnhance.Contrast(filtered).enhance(2)
    filtered = filtered.filter(ImageFilter.BLUR)
    filtered = ImageEnhance.Brightness(filtered).enhance(2)
    filtered = ImageEnhance.Contrast(filtered).enhance(2)

#filtered.show()

s = ""

print("MATCHING")
for y in range(0, math.floor(height/16)):
    for x in range(0, math.floor(width/9)):
        bounding_box = (x * 9, y * 16, x * 9 + 9, y * 16 + 16)
        selection = filtered.crop(bounding_box)
        match = {}
        for bmp in character_bmps.keys():
Exemple #17
0
import numpy as np
from PIL import Image, ImageEnhance

splattern_types = ['castoff', 'drops', 'projected', 'spatters']

jpg_files = [
    str(p) for splattern in splattern_types
    for p in Path(f'./images/raw/{splattern}/').glob('*.jpg')
]

for filename in jpg_files:
    print(f'Processing file {filename}')
    im = Image.open(filename)
    brightness = ImageEnhance.Brightness(im)
    im = brightness.enhance(2)
    contrast = ImageEnhance.Contrast(im)
    im = contrast.enhance(1.5)
    im
    im = im.convert('RGBA')
    data = np.array(im)
    rgb = data[:, :, :3]
    color = [50, 0, 0]
    black = [0, 0, 0, 255]
    white = [255, 255, 255, 255]
    mask = np.all((rgb - color) > 50, axis=-1)
    data[mask] = white
    new_im = Image.fromarray(data)
    grayscale = new_im.convert("L")
    grayscale.save(
        filename.replace('/raw/', '/processed/').replace('.jpg', '') +
        '-processed.jpg')
 def __call__(self, img):
     alpha = random.uniform(1 - self.var, 1 + self.var)
     return ie.Contrast(img).enhance(alpha)
import cv2
import imutils
import numpy as np
from PIL import Image
from PIL import ImageEnhance

image = Image.open("./paper12.jpg")
enh_bri = ImageEnhance.Brightness(image)
brightness = 1
image_brightened = enh_bri.enhance(brightness)
#image_brightened.save("bright.jpg")

image = image_brightened
enh_col = ImageEnhance.Color(image)
color = 1.5
image_colored = enh_col.enhance(color)
#image_colored.save("color.jpg")

image = image_colored
enh_con = ImageEnhance.Contrast(image)
contrast = 1.5
image_contrasted = enh_con.enhance(contrast)
#image_contrasted.save("contrast.jpg")

image = image_contrasted
enh_sha = ImageEnhance.Sharpness(image)
sharpness = 1.0
image_sharped = enh_sha.enhance(sharpness)
image_sharped.save("/var/www/html/python/enhanced_img.jpg")
Exemple #20
0
    def verfyCode(self,
                  driver,
                  ID=None,
                  Class=None,
                  css=None,
                  link_text=None,
                  xpath=None):
        vCodePath = os.path.join(os.path.abspath(".."), 'verPic')
        if not os.path.exists(vCodePath):
            os.mkdir(vCodePath)
        # 保存图片
        imagePath = vCodePath + '\\' + 'CreateCaptcha.png'
        driver.get_screenshot_as_file(imagePath)

        if ID != None:
            ID = str(ID)
            # 获取验证码的x,y
            self.imageEle = driver.find_element_by_id(ID)
        if Class != None:
            Class = str(Class)
            # 获取验证码的x,y
            self.imageEle = driver.find_element_by_class_name(Class)
        if css != None:
            css = str(css)
            # 获取验证码的x,y
            self.imageEle = driver.find_element_by_css_selector(css)
        if link_text != None:
            link_text = str(link_text)
            # 获取验证码的x,y
            self.imageEle = driver.find_element_by_link_text(link_text)

        if xpath != None:
            xpath = str(xpath)
            # 获取验证码的x,y
            self.imageEle = driver.find_element_by_xpath(xpath)

        location = self.imageEle.location
        # 获取size
        size = self.imageEle.size
        self.codeRange = (int(location['x']), int(location['y']),
                          int(location['x'] + size['width']),
                          int(location['y'] + size['height']))
        # print(size)
        # 打开png图片
        imageTemp = Image.open(imagePath)

        imageFrame = imageTemp.crop(self.codeRange)  #截取验证码图片区域

        imageFrame.save(imagePath)
        time.sleep(2)

        image = Image.open(imagePath)

        image = image.convert(
            'L')  #图像加强,二值化,PIL中有九种不同模式。分别为1,L,P,RGB,RGBA,CMYK,YCbCr,I,F。L为灰度图像

        ImageEnhance.Contrast(image)  # 对比度增强
        threshold = 80  #设定阈值
        table = []
        for i in range(256):
            if i < threshold:
                table.append(0)
            else:
                table.append(1)
        # print(table)
        image.point(table, '1')

        # image = image.convert('RGBA')
        # picData = image.load()
        # for y in range(image.size[1]):
        #     for x in range(image.size[0]):
        #         # 循环图像里的每一个像素。每个像素为一个长度为4的列表。因为图片转换成RGBA模式,所以列表长度为4,A就是透明度
        #         if picData[x, y][0] > 80 and picData[x, y][1] > 80 and picData[x, y][2] > 80 and picData[x, y][3] > 80:
        #             picData[x, y] = (255, 255, 255, 0)
        #         else:
        #             picData[x, y] = (0, 0, 0, 0)
        # image.resize((500,400))
        #image.show()

        result = pytesser3.image_to_string(image).replace(' ', '').replace(
            '"', '').replace('-', '').replace('.',
                                              '').replace('`',
                                                          '').replace(';', '')
        print(u'验证码为:%s' % result)
        return result
        def __init__(self,
                     p1,
                     operation1,
                     magnitude_idx1,
                     p2,
                     operation2,
                     magnitude_idx2,
                     fillcolor=(128, 128, 128)):
            ranges = {
                "shearX": np.linspace(0, 0.3, 10),
                "shearY": np.linspace(0, 0.3, 10),
                "translateX": np.linspace(0, 150 / 331, 10),
                "translateY": np.linspace(0, 150 / 331, 10),
                "rotate": np.linspace(0, 30, 10),
                "color": np.linspace(0.0, 0.9, 10),
                "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
                "solarize": np.linspace(256, 0, 10),
                "contrast": np.linspace(0.0, 0.9, 10),
                "sharpness": np.linspace(0.0, 0.9, 10),
                "brightness": np.linspace(0.0, 0.9, 10),
                "autocontrast": [0] * 10,
                "equalize": [0] * 10,
                "invert": [0] * 10
            }

            # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
            def rotate_with_fill(img, magnitude):
                rot = img.convert("RGBA").rotate(magnitude)
                return Image.composite(
                    rot, Image.new("RGBA", rot.size, (128, ) * 4),
                    rot).convert(img.mode)

            func = {
                "shearX":
                lambda img, magnitude: img.
                transform(img.size,
                          Image.AFFINE,
                          (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
                          Image.BICUBIC,
                          fillcolor=fillcolor),
                "shearY":
                lambda img, magnitude: img.transform(
                    img.size,
                    Image.AFFINE,
                    (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
                    Image.BICUBIC,
                    fillcolor=fillcolor),
                "translateX":
                lambda img, magnitude: img.transform(
                    img.size,
                    Image.AFFINE, (1, 0, magnitude * img.size[0] * random.
                                   choice([-1, 1]), 0, 1, 0),
                    fillcolor=fillcolor),
                "translateY":
                lambda img, magnitude: img.transform(
                    img.size,
                    Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] *
                                   random.choice([-1, 1])),
                    fillcolor=fillcolor),
                "rotate":
                lambda img, magnitude: rotate_with_fill(img, magnitude),
                # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
                "color":
                lambda img, magnitude: ImageEnhance.Color(img).enhance(
                    1 + magnitude * random.choice([-1, 1])),
                "posterize":
                lambda img, magnitude: ImageOps.posterize(img, magnitude),
                "solarize":
                lambda img, magnitude: ImageOps.solarize(img, magnitude),
                "contrast":
                lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
                    1 + magnitude * random.choice([-1, 1])),
                "sharpness":
                lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
                    1 + magnitude * random.choice([-1, 1])),
                "brightness":
                lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
                    1 + magnitude * random.choice([-1, 1])),
                "autocontrast":
                lambda img, magnitude: ImageOps.autocontrast(img),
                "equalize":
                lambda img, magnitude: ImageOps.equalize(img),
                "invert":
                lambda img, magnitude: ImageOps.invert(img)
            }

            # self.name = "{}_{:.2f}_and_{}_{:.2f}".format(
            #     operation1, ranges[operation1][magnitude_idx1],
            #     operation2, ranges[operation2][magnitude_idx2])
            self.p1 = p1
            self.operation1 = func[operation1]
            self.magnitude1 = ranges[operation1][magnitude_idx1]
            self.p2 = p2
            self.operation2 = func[operation2]
            self.magnitude2 = ranges[operation2][magnitude_idx2]
Exemple #22
0
#setup memory LCS
lcd = SMemLCD('/dev/spidev0.0')

# GPIO setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(7, GPIO.RISING)  # add rising edge detection on a channel

try:

    for foo in camera.capture_continuous(stream, format='jpeg', use_video_port=True, resize=(WIDTH, HEIGHT)):
        stream.seek(0) # "Rewind" the stream to the beginning so we can read its content

        image_source = Image.open(stream)
        imageEnancer = ImageEnhance.Contrast(image_source)
        imageContrasted = imageEnancer.enhance(2)
        imageInverted = PIL.ImageOps.invert(imageContrasted)
        imagedithered = imageInverted.convert('1') # convert image to black or white
        
        lcd.write(imagedithered.tobytes())
        
        stream.seek(0)
        
        if GPIO.event_detected(7):
            print('Button pressed')
            break
    
    camera.capture(stream, format='jpeg', use_video_port=True)
    stream.seek(0) # "Rewind" the stream to the beginning so we can read its content
    image_source = Image.open(stream)
Exemple #23
0
from PIL import Image, ImageEnhance
import cv2
import numpy as np

brightness_rate = 3.0
contrast_rate = 10.0
color_rate = 0.4
peak = Image.open("image.jpg")


# Change Brightness
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness_rate)
bright.save("Brightness.jpg")

# Change contrast
contrast = ImageEnhance.Contrast(peak)
contrast = contrast.enhance(contrast_rate) # set FACTOR > 1 to enhance contrast, < 1 to decrease
contrast.save("Contrast.jpg")

#change color
color = ImageEnhance.Color(peak)
color = color.enhance(color_rate)
color.save("color.jpg")
Exemple #24
0
 def __init__(self, image: Image, contrast: float = 1):
     """
     Initializes the engine with PIL Image.
     """
     self.__image = ImageEnhance.Contrast(
         image.convert("L")).enhance(contrast)
Exemple #25
0
            size = browser.find_element_by_id('verify_img').size
        left = location['x']
        top = location['y']
        right = location['x'] + size['width']
        bottom = location['y'] + size['height']
        #print left,top,right,bottom
        #从文件读取截图,截取验证码位置再次保存
        """
            注意:这块有个坑,获取的数值都是float,需要强制转化成int,否则会报错
        """
        img = Image.open(screenImg).crop(
            (int(left), int(top), int(right), int(bottom)))
        #img.show()
        #time.sleep(10)
        img = img.convert('L')  #转换模式:L | RGB
        img = ImageEnhance.Contrast(img)  #增强对比度
        img = img.enhance(2.0)  #增加饱和度
        img.save(screenImg)
        """
            换别的OCR图像识别
        """
        code = getCode(screenImg)
        if (verifyType == 1):
            browser.find_element_by_id("seccodeInput").send_keys(code.strip())
        else:
            browser.find_element_by_id("input").send_keys(code.strip())
        #输出识别码
        print(code.strip())
        time.sleep(5)

    #提交数据
def post_process_particle_layer(particle_layer):
    particle_layer = ImageEnhance.Contrast(particle_layer).enhance(1.5)
    particle_layer = ImageEnhance.Brightness(particle_layer).enhance(2.2)
    particle_layer = particle_layer.filter(
        ImageFilter.GaussianBlur(radius=1.5))
    return particle_layer
    session_login = requests.Session()
    url = "https://portal.nctu.edu.tw/portal/login.php"
    login_url = "https://portal.nctu.edu.tw/portal/chkpas.php?"
    t_login = session_login.get(url)

    obj = BeautifulSoup(t_login.text, "html.parser")
    img_ = obj.find("img", {"id": "captcha"})
    img_link = urllib.parse.urljoin(url, img_['src'])

    t = session_login.get(img_link)
    with open("captcha.jpg", "wb") as f:
        f.write(t.content)
        f.close()

    img_open = Image.open('captcha.jpg').convert('LA')
    en_img_c = ImageEnhance.Contrast(img_open).enhance(2)
    en_img_b = ImageEnhance.Brightness(en_img_c).enhance(2)
    text = pytesseract.image_to_string(en_img_b)

    username = '******'
    password = '******'

    payload = {
        'username': username2,
        'password': password2,
        'seccode': text,
        'Submit2': "登入(Login)",
        'pwdtype': "static"
    }

    login = session_login.post(login_url, data=payload)
Exemple #28
0
def contrast(image):
    enh = ImageEnhance.Contrast(image)
    contra = enh.enhance(3.0)
    return contra
Exemple #29
0
def main(lat, lon, sensor, level, start_date, end_date, buffer_size, res,
         bands, output, saveintermediary, max_images, singleenhancement,
         enhancement, percentiles, contrast_factor, brightness_factor,
         duration, taboo_index, stac_endpoint):
    """ Create animated GIF from CBERS 4 data"""

    rgb = bands.split(',')
    assert len(rgb) == 3, "Exactly 3 bands must be defined"

    percents = percentiles.split(',')
    assert len(percents) == 2, 'Two percentiles must be defined'
    p_min = int(percents[0])
    p_max = int(percents[1])

    taboo_list = list()
    if taboo_index:
        for item in taboo_index.split(','):
            taboo_list.append(int(item))

    scenes = utils.search(sensor=sensor,
                          mode='stac',
                          lon=lon,
                          lat=lat,
                          level=None if level == 'all' else level,
                          start_date=start_date,
                          end_date=end_date,
                          stac_endpoint=stac_endpoint)
    click.echo('{} scenes found'.format(len(scenes)))

    # Output transform
    aoi_wgs84 = utils.lonlat_to_geojson(lon, lat, buffer_size)
    aoi_bounds = utils.feat_to_bounds(aoi_wgs84)  # (minx, miny, maxx, maxy)
    width = int((aoi_bounds[2] - aoi_bounds[0]) / float(res))
    height = int((aoi_bounds[3] - aoi_bounds[1]) / float(res))
    #dst_affine = transform.from_bounds(*aoi_bounds, width, height)

    s3_bucket = 'cbers-pds'

    images = []

    p_min_value = [None] * 3
    p_max_value = [None] * 3

    for scene_no, scene in enumerate(scenes):

        if scene_no in taboo_list:
            print('Skipping scene {}'.format(scene_no))
            continue

        if scene_no >= max_images:
            break

        print(scene)
        s3_key = 's3://{bucket}/{dir}'.format(bucket=s3_bucket,
                                              dir=scene['key'])
        print(s3_key)

        out = np.zeros((3, height, width), dtype=np.uint8)

        for band_no, band in enumerate(rgb):

            matrix = utils.get_frame_matrix(s3_key, band, scene, aoi_bounds,
                                            width, height)

            if (scene_no == 0 or not singleenhancement) and enhancement:
                p_min_value[band_no], \
                    p_max_value[band_no] = np.\
                                           percentile(matrix[matrix > 0],
                                                      (p_min, p_max))
                print('{}, {}-{}, {}-{}'.format(band_no, p_min, p_max,
                                                p_min_value, p_max_value))

            if enhancement:
                matrix = np.where(matrix > 0,
                                  utils.\
                                  linear_rescale(matrix,
                                                 in_range=\
                                                 [int(p_min_value[band_no]),
                                                  int(p_max_value[band_no])],
                                                 out_range=[1, 255]),
                                  0)

            out[band_no] = matrix.astype(np.uint8)

        img = Image.fromarray(np.dstack(out))

        if saveintermediary:
            img.save('{}.bmp'.format(scene_no))

        contrast = ImageEnhance.Contrast(img)
        enh_image = contrast.enhance(contrast_factor)
        enh_image = ImageEnhance.Contrast(enh_image).enhance(brightness_factor)

        text_value = '%d, %s' % (scene_no, scene['acquisition_date'])
        draw = ImageDraw.Draw(enh_image)
        xst, yst = draw.textsize(text_value, font=FONT)
        draw.rectangle([(5, 5), (xst + 15, yst + 15)], fill=(255, 255, 255))
        draw.text((10, 10), text_value, (0, 0, 0), font=FONT)

        images.append(enh_image)

    if images:
        utils.save_animated_gif(output, images, duration=duration)
Exemple #30
0
 def __init__(self, Numbers=None, max_Magnitude=None):
     self.transforms = [
         'autocontrast', 'equalize', 'rotate', 'solarize', 'color',
         'posterize', 'contrast', 'brightness', 'sharpness', 'shearX',
         'shearY', 'translateX', 'translateY'
     ]
     if Numbers is None:
         self.Numbers = len(self.transforms) // 2
     else:
         self.Numbers = Numbers
     if max_Magnitude is None:
         self.max_Magnitude = 10
     else:
         self.max_Magnitude = max_Magnitude
     fillcolor = 128
     self.ranges = {
         # these  Magnitude   range , you  must test  it  yourself , see  what  will happen  after these  operation ,
         # it is no  need to obey  the value  in  autoaugment.py
         "shearX": np.linspace(0, 0.3, 10),
         "shearY": np.linspace(0, 0.3, 10),
         "translateX": np.linspace(0, 0.2, 10),
         "translateY": np.linspace(0, 0.2, 10),
         "rotate": np.linspace(0, 360, 10),
         "color": np.linspace(0.0, 0.9, 10),
         "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
         "solarize": np.linspace(256, 231, 10),
         "contrast": np.linspace(0.0, 0.5, 10),
         "sharpness": np.linspace(0.0, 0.9, 10),
         "brightness": np.linspace(0.0, 0.3, 10),
         "autocontrast": [0] * 10,
         "equalize": [0] * 10,
         "invert": [0] * 10
     }
     self.func = {
         "shearX":
         lambda img, magnitude: img.transform(img.size,
                                              Image.AFFINE,
                                              (1, magnitude * random.choice(
                                                  [-1, 1]), 0, 0, 1, 0),
                                              Image.BICUBIC,
                                              fill=fillcolor),
         "shearY":
         lambda img, magnitude: img.transform(img.size,
                                              Image.AFFINE,
                                              (1, 0, 0, magnitude * random.
                                               choice([-1, 1]), 1, 0),
                                              Image.BICUBIC,
                                              fill=fillcolor),
         "translateX":
         lambda img, magnitude: img.transform(
             img.size,
             Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice(
                 [-1, 1]), 0, 1, 0),
             fill=fillcolor),
         "translateY":
         lambda img, magnitude: img.transform(
             img.size,
             Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.
                            choice([-1, 1])),
             fill=fillcolor),
         "rotate":
         lambda img, magnitude: self.rotate_with_fill(img, magnitude),
         # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
         "color":
         lambda img, magnitude: ImageEnhance.Color(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "posterize":
         lambda img, magnitude: ImageOps.posterize(img, magnitude),
         "solarize":
         lambda img, magnitude: ImageOps.solarize(img, magnitude),
         "contrast":
         lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "sharpness":
         lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "brightness":
         lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "autocontrast":
         lambda img, magnitude: ImageOps.autocontrast(img),
         "equalize":
         lambda img, magnitude: img,
         "invert":
         lambda img, magnitude: ImageOps.invert(img)
     }