예제 #1
0
def wand1():
    """This is Python Wand example 1"""
    the_time = t.asctime()

    print "Importing image ", IFILE
    img_1 = Image(filename=IFILE)

    print "Cropping and resizing the image"
    img_1.crop(300, 0, width=300, height=282)
    img_1.resize(width=600, height=564)

    print "Creating a drawing and overlaying on it"
    draw = Drawing()

    draw.circle((100, 100), (120, 120))

    draw.rectangle(left=img_1.width-300, top=img_1.height-45, width=230,
               height=40, radius=5)

    draw.font_size = 17
    draw.fill_color = Color('white')
    draw.text_color = Color('white')
    draw.text(img_1.width-290, img_1.height-20, the_time)
    draw(img_1)

    print "Displaying, close the XTERM when done"
    display(img_1)
예제 #2
0
    def compose_image_slide(self, image_path=None, text=None, slide_id=1):

        image_display_size = (300, 190)

        key = '%s-%s-%03d' % (self.emission.uuid, self.content_object.uuid, slide_id)
        path = os.path.join(SLIDE_BASE_DIR, key + '.{}'.format(IMAGE_OUTPUT_FORMAT))
        url = SLIDE_BASE_URL + key + '.{}'.format(IMAGE_OUTPUT_FORMAT)

        overlay_image = Image(filename=image_path)

        with Drawing() as draw:

            size = overlay_image.size

            if size[0] > size[1]:
                orientation = 'landscape'
                scale = float(image_display_size[1]) / float(size[1])
            else:
                orientation = 'portrait'
                scale = float(image_display_size[1]) / float(size[0])

            overlay_image.resize(int(size[0] * scale), int(size[1] * scale))

            #size = overlay_image.size

            width = 190
            height = 190
            overlay_image.crop(10, 0, width=width, height=height)

            draw.composite('over', left=int(width / 2) - 20, top=10, width=width, height=height, image=overlay_image)

            # text settings
            draw.font = SLIDE_BASE_FONT
            draw.font_size = 14
            draw.text_interline_spacing = 8
            draw.fill_color = Color('white')
            draw.text_antialias = True

            # draw text
            if text:
                draw.text(220, 10, text)

            # compose image
            with Image(filename=SLIDE_BASE_IMAGE) as image:
                draw(image)

                if IMAGE_OUTPUT_FORMAT == 'jpg':
                    image.compression_quality = 62
                    image.format = 'jpeg'

                image.save(filename=path)
                image.save(filename=os.path.join(SLIDE_BASE_DIR, 'debug-{}.{}'.format(slide_id, IMAGE_OUTPUT_FORMAT)))

        try:
            overlay_image.close()
        except Exception as e:
            # TODO: use narrowed exception(s)
            log.warning('unable to close magick/wand overlay image - {}'.format(e))

        return url
예제 #3
0
def convert_image(pngfile, pf, outdir=".",
                  resize=1000, format="jpeg", rotate=0,
                  rows=':', cols=':', labelrows=None, labelcols=None):
    resizefile = op.join(outdir, pf + ".resize.jpg")
    mainfile = op.join(outdir, pf + ".main.jpg")
    labelfile = op.join(outdir, pf + ".label.jpg")
    img = Image(filename=pngfile)
    exif = dict((k, v) for k, v in img.metadata.items() if k.startswith('exif:'))

    # Rotation, slicing and cropping of main image
    if rotate:
        img.rotate(rotate)
    if resize:
        w, h = img.size
        if min(w, h) > resize:
            if w < h:
                nw, nh = resize, resize * h / w
            else:
                nw, nh = resize * w / h, resize
            img.resize(nw, nh)
            logging.debug("Image `{0}` resized from {1}px:{2}px to {3}px:{4}px".\
                            format(pngfile, w, h, nw, nh))
    img.format = format
    img.save(filename=resizefile)

    rimg = img.clone()
    if rows != ':' or cols != ':':
        w, h = img.size
        ra, rb = slice(rows, h)
        ca, cb = slice(cols, w)
        # left, top, right, bottom
        logging.debug("Crop image to {0}:{1} {2}:{3}".format(ra, rb, ca, cb))
        img.crop(ca, ra, cb, rb)
        img.format = format
        img.save(filename=mainfile)
    else:
        mainfile = resizefile

    # Extract text labels from image
    if labelrows or labelcols:
        w, h = rimg.size
        if labelrows and not labelcols:
            labelcols = ':'
        if labelcols and not labelrows:
            labelrows = ':'
        ra, rb = slice(labelrows, h)
        ca, cb = slice(labelcols, w)
        logging.debug("Extract label from {0}:{1} {2}:{3}".format(ra, rb, ca, cb))
        rimg.crop(ca, ra, cb, rb)
        rimg.format = format
        rimg.save(filename=labelfile)
    else:
        labelfile = None

    return resizefile, mainfile, labelfile, exif
예제 #4
0
def animate_slice(sliceviewer, name, start, end, filename, num_frames=10, font_size=24):
    """Generate an animated gif of a 2D slice moving through a third dimension.

    Args:
        sliceviewer (SliceViewer): A sliceviewer instance.
        name (str): The name of the third dimension to use.
        start (float): The starting value of the third dimension.
        end (float): The end value of the third dimension.
        filename (str): The file to save the gif to.

    Kwargs:
        num_frames (int): The number of frames the gif should contain.
        font_size: (int): The size of the caption.

    Example:
        ws = CreateMDWorkspace(3, Extents=[-10,10,-10,10,-10,10], Names=["X","Y","Z"], Units=["u","u","u"])
        FakeMDEventData(ws, PeakParams=[10000,0,0,0,1])
        sv = plotSlice(ws)
        #Resize and configure the slice viewer how you want the output to look
        sv.setNormalization(1) # We need to normalize by volume in this case, or the data won't show up
        #This will create a gif iterating from Z = -1 to Z = 1
        animate_slice(sv, "Z", -1, 1, "output.gif")
    """
    #Generate all the individual frames
    images = []
    for slice_point in numpy.linspace(start, end, num_frames):
        sliceviewer.setSlicePoint(name, slice_point)
        sliceviewer.refreshRebin()
        qimg = sliceviewer.getImage().toImage()
        data = QByteArray()
        buf = QBuffer(data)
        qimg.save(buf, "PNG")
        image = Image(blob=str(data))
        captionstrip_size = font_size + 10
        #To add whitespace to the top, we add a vertical border,
        #then crop out the bottom border
        image.border(Color("#fff"), 0, captionstrip_size)
        image.crop(0, 0, image.width, image.height - captionstrip_size)
        #Write the caption into the whitespace
        draw = Drawing()
        draw.font_size = font_size
        draw.text(5, font_size,"%s = %g" % (name,slice_point))
        draw(image)
        images.append(image)
    #Create a new image with the right dimensions
    animation = Image(width=images[0].width, height=images[0].height)
    #Copy in the frames from all the generated images
    for image in images:
        animation.sequence.append(image.sequence[0])
    #Drop the initial blank frame
    del animation.sequence[0]
    #Write the animation to disk
    animation.save(filename=filename)
def renderAnimation(animationName, numFrames):
    print("Rendering animation: " + animationName)
    dirToFramesMapping = dict()
    
    animationDirectory =  workingDirectory + animationName + "/"
 
    for key, value in directions.items():
        directory = animationDirectory + value + "/"
        frames = []
        
        for i in range(0, numFrames):
            frameBuffer = Image(filename=(directory + animationName + "_" + str(i + 1) + ".png"));
            
            if(frameBuffer.width > frameWidth):
                frameBuffer.crop(int((frameBuffer.width - frameWidth)/2), 0, width=frameWidth, height=frameBuffer.height)
        
            if(frameBuffer.height > frameHeight):
                frameBuffer.crop(0, int((frameBuffer.height - frameHeight)/2), frameBuffer.width, height=frameHeight)
        
            newBuffer = Image(width=frameWidth, height=frameHeight)
            newBuffer.composite(frameBuffer, int((newBuffer.width - frameBuffer.width) / 2), int((newBuffer.height - frameBuffer.height) / 2))
            frameBuffer = newBuffer
        
            frames.append(frameBuffer)
            
        dirToFramesMapping[key] = frames
    
    directionAnimations = dict()
    
    for key, value in dirToFramesMapping.items():
        directionAnimationBuffer = Image(width=frameWidth * numFrames, height=frameHeight)
        for i in range(0, len(value)):
            frameBuffer = value[i]
            directionAnimationBuffer.composite(frameBuffer, i * frameWidth, 0)
         
        directionAnimations[key] = directionAnimationBuffer
    
    animation = Image(width=frameWidth * numFrames, height=frameHeight * len(directions))
    animation.composite(directionAnimations["n"], 0, 0)
    animation.composite(directionAnimations["ne"], 0, frameHeight)
    animation.composite(directionAnimations["e"], 0, frameHeight * 2)
    animation.composite(directionAnimations["se"], 0, frameHeight * 3)
    animation.composite(directionAnimations["s"], 0, frameHeight * 4)
    animation.composite(directionAnimations["sw"], 0, frameHeight * 5)
    animation.composite(directionAnimations["w"], 0, frameHeight * 6)
    animation.composite(directionAnimations["nw"], 0, frameHeight * 7)
    
    animation.save(filename=animationDirectory + animationName + ".png")
 def magick_crop(self, image_url=None, dataX=0, dataY=0, dataWidth=0, dataHeight=0, dataRotate=0, dataScaleX=1, dataScaleY=1, **kw):
     if 'ir.attachment' in image_url:
         # binary -> decode -> wand.image -> imagemagick -> make_blob() -> encode -> binary
         img_attachment = request.env['ir.attachment'].browse(int(image_url.split('/')[4].split('_')[0]))
         wand_img = Image(blob=getattr(img_attachment, 'datas').decode('base64'))
         try:
             wand_img.crop(int(dataX), int(dataY), width=int(dataWidth), height=int(dataHeight))
             if dataScaleX and dataScaleY:
                 wand_img.resize(int(wand_img.width * float(dataScaleX)), int(wand_img.height * float(dataScaleY)))
             if dataRotate:
                 wand_img.rotate(int(dataRotate))
             img_attachment.write({ 'datas': wand_img.make_blob().encode('base64') })
         except Exception as e:
             return ': '.join(e)
         return 'Magic Crop Completed!'
     else:
         return 'Please using attachment as image!'
예제 #7
0
파일: pattern.py 프로젝트: arnfred/Knitwit
def open_image(path, colors, width = 60, crop = None, gauge = [40,40]) :
    # Find height ratio
    height_ratio = gauge[1] / float(gauge[0])

    # Open image, resize and posterize
    with open(path) as fp :
        # Open image
        image = Image(file=fp)
        # Crop
        if crop != None :
            image.crop(crop['x'], crop['y'], crop['w'] + crop['x'], crop['h'] + crop['y'])
            # Resize to width and height ratio
            resize(image, width, height_ratio)
            # Get data
            data = get_data(PImage.open(StringIO.StringIO(image.make_blob('ppm'))))
            # Posterize image to fewer colors
    return posterize(data, colors)
 def magick_crop(self, image_url=None, dataX=0, dataY=0, dataWidth=0, dataHeight=0, dataRotate=0, dataScaleX=1, dataScaleY=1, **kw):
     if 'ir.attachment' in image_url:
         # binary -> decode -> wand.image -> imagemagick -> make_blob() -> encode -> binary
         img_attachment = request.env['ir.attachment'].browse(int(image_url.split('/')[4].split('_')[0]))
         wand_img = Image(blob=getattr(img_attachment, 'datas').decode('base64'))
         try:
             wand_img.crop(int(dataX), int(dataY), width=int(dataWidth), height=int(dataHeight))
             if dataScaleX and dataScaleY:
                 wand_img.resize(int(wand_img.width * float(dataScaleX)), int(wand_img.height * float(dataScaleY)))
             if dataRotate:
                 wand_img.rotate(int(dataRotate))
             img_attachment.write({ 'datas': wand_img.make_blob().encode('base64') })
         except Exception as e:
             return ': '.join(e)
         return 'Magic Crop Completed!'
     else:
         return 'Please using attachment as image!'
def renderAnimation(animationName):
    print("Rendering animation: " + animationName)
    dirToFramesMapping = dict()

    animationDirectory = workingDirectory + animationName + "/"

    for key, value in directions.items():

        frameBuffer = Image(filename=(animationDirectory + animationName + "_" + value + ".png"))

        if frameBuffer.width > frameWidth:
            frameBuffer.crop(int((frameBuffer.width - frameWidth) / 2), 0, width=frameWidth, height=frameBuffer.height)

        if frameBuffer.height > frameHeight:
            frameBuffer.crop(0, int((frameBuffer.height - frameHeight) / 2), frameBuffer.width, height=frameHeight)

        newBuffer = Image(width=frameWidth, height=frameHeight)
        newBuffer.composite(
            frameBuffer,
            int((newBuffer.width - frameBuffer.width) / 2),
            int((newBuffer.height - frameBuffer.height) / 2),
        )
        frameBuffer = newBuffer

        dirToFramesMapping[key] = frameBuffer

    directionAnimations = dict()

    for key, value in dirToFramesMapping.items():
        directionAnimationBuffer = Image(width=frameWidth, height=frameHeight)
        directionAnimationBuffer.composite(value, 0, 0)

        directionAnimations[key] = directionAnimationBuffer

    animation = Image(width=frameWidth, height=frameHeight * len(directions))
    animation.composite(directionAnimations["n"], 0, 0)
    animation.composite(directionAnimations["ne"], 0, frameHeight)
    animation.composite(directionAnimations["e"], 0, frameHeight * 2)
    animation.composite(directionAnimations["se"], 0, frameHeight * 3)
    animation.composite(directionAnimations["s"], 0, frameHeight * 4)
    animation.composite(directionAnimations["sw"], 0, frameHeight * 5)
    animation.composite(directionAnimations["w"], 0, frameHeight * 6)
    animation.composite(directionAnimations["nw"], 0, frameHeight * 7)

    animation.save(filename=animationDirectory + animationName + ".png")
예제 #10
0
def get_element_screenshot(element: WebElement) -> bytes:
    driver = element._parent
    ActionChains(driver).move_to_element(element).perform()  # focus
    src_base64 = driver.get_screenshot_as_base64()
    scr_png = b64decode(src_base64)
    scr_img = Image(blob=scr_png)

    x = element.location["x"] + 750
    y = element.location["y"] + 415
    w = element.size["width"]
    h = element.size["height"]
    scr_img.crop(
        left=math.floor(x),
        top=math.floor(y),
        width=math.ceil(w),
        height=math.ceil(h),
    )
    return scr_img.make_blob()
예제 #11
0
def resizeImage(imagePath, fullsize):
    mon_aspect = fullsize.x / fullsize.y
    img = Image(filename=imagePath)
    img_aspect = img.width / img.height
    if img_aspect > mon_aspect:
        if img.height >= fullsize.y:
            img.transform(resize='x' + str(fullsize.y))
            leftc = (img.width - fullsize.x) / 2
            leftc = round(leftc)
            print("everything trueee")
            img.crop(leftc, 0, width=fullsize.x, height=fullsize.y)
        else:
            print("img.height < fullsize.y")
    else:
        img.transform(resize=str(fullsize.x) + 'x')
        topc = (img.height - fullsize.y) / 2
        topc = round(topc)
        img.crop(0, topc, width=fullsize.x, height=fullsize.y)
    return img
예제 #12
0
def open_image(path, colors, width=60, crop=None, gauge=[40, 40]):
    # Find height ratio
    height_ratio = gauge[1] / float(gauge[0])

    # Open image, resize and posterize
    with open(path) as fp:
        # Open image
        image = Image(file=fp)
        # Crop
        if crop != None:
            image.crop(crop['x'], crop['y'], crop['w'] + crop['x'],
                       crop['h'] + crop['y'])
            # Resize to width and height ratio
            resize(image, width, height_ratio)
            # Get data
            data = get_data(
                PImage.open(StringIO.StringIO(image.make_blob('ppm'))))
            # Posterize image to fewer colors
    return posterize(data, colors)
예제 #13
0
파일: browser.py 프로젝트: kmcintyre/scewpt
    def post_process(self, ig, incoming):
        from qt import qt5
        from qt.channels import InstagramPositioningAction
        from wand.image import Image

        location = '/tmp/instagram/temp_' + fixed.digest(
            ig[instagram_keys.instagram_url]) + '.png'
        qt5.app.toImage(location)

        instagram_action = InstagramPositioningAction()
        instagram_d = defer.Deferred()
        self.set_action(instagram_action, instagram_d)
        self.page().runJavaScript(instagram_action.js_script())
        instagram_positioning = yield instagram_d

        print 'instagram positioning:', instagram_positioning
        if instagram_positioning:
            instagram_im = Image(filename=location)
            instagram_im.crop(left=instagram_positioning[3],
                              top=instagram_positioning[0],
                              right=instagram_positioning[1],
                              bottom=instagram_positioning[2])
            instagram_clip = '/tmp/instagram/instagram_' + fixed.digest(
                ig[instagram_keys.instagram_url]) + '.png'
            instagram_im.save(filename=instagram_clip)

            print 'send_image:', instagram_clip
            try:
                io = StringIO.StringIO(open(instagram_clip,
                                            'r').read()).getvalue()
                fi = self.bundle_instagram(incoming._data, io)
                print 'send mime:', len(io)
                self.protocol.sendMessage(fi, True)
            except Exception as e:
                print 'send image exception:', e
        if Tweet.self_instagrams in incoming:
            for i in incoming[Tweet.self_instagrams]:
                self.check_avatar(incoming, i)
        elif Tweet.known_instagrams in incoming:
            for i in incoming[Tweet.known_instagrams]:
                self.check_avatar(i, i)
예제 #14
0
def load_one_bit_byte_array(image_filename):
    try:
        img = Image(filename=image_filename)

        # scale
        # option 1: seam carving
        #img.liquid_rescale(176,264)

        # option 2: scale with aspect ratio:
        img.transform(resize='176x264^')
        img.crop(width=176, height=264)

        # OrderedDitherImage would allow using 'h4x4a' dither but it's not in python API?
        img.type = 'bilevel'
        # screen is rotated
        img.rotate(90)

        # Write a binary array of b/w pixels
        return _image_to_one_bit_byte_array(img)
    except BlobError:
        img = None
        return None
예제 #15
0
    def resize(self, img: WandImage, mult: float) -> WandImage:
        # No need to waste our resources for resizing by the same exact multiplier (1:1)
        if mult == 1.0:
            return img
        # Or even invalid multiplier (raise exception instead)
        elif mult <= 0:
            raise ValueError("Image resize multiplier must not be less than 0!")

        # Resize the image using the multiplier
        new_width = round(img.width * mult)
        new_height = round(img.height * mult)
        img.adaptive_resize(new_width, new_height)

        # Then crop the image so it's centered
        crop_left = round((self.max_width - img.width) / 2)
        if crop_left < 0:
            crop_left = 0
        crop_right = crop_left + self.max_width
        if crop_right > img.width:
            crop_right = img.width
        img.crop(crop_left, 0, crop_right, self.max_height)

        return img
예제 #16
0
def create_image(path, width, height):
    image = Image(filename=path)
    if (image.width >= width or image.height >= height):
        # breite ist maßgebend
        h = height
        w = width
        if (image.width / width < image.height / height):
            h = int(round(image.height / (image.width / width)))
        # höhe ist maßgebend
        else:
            w = int(round(image.width / (image.height / height)))

        image.resize(w, h)
        image.crop(width=width, height=height, gravity='center')
        return image
    else:
        outerImg = Image(width=width, height=height, background=image[1][1])
        outerImg.format = image.format.lower()
        outerImg.composite(image,
                           left=int((width - image.width) / 2),
                           top=int((height - image.height) / 2))
        image.close()
        return outerImg
예제 #17
0
    def do_cube(img):
        with WandImage(blob=img) as image:

            def s(x):
                return int(x / 3)

            image.resize(s(1000), s(860))
            image.format = "png"
            image.alpha_channel = 'opaque'

            image1 = image
            image2 = WandImage(image1)

            out = WandImage(width=s(3000 - 450), height=s(860 - 100) * 3)
            out.format = "png"

            image1.shear(background=wand.color.Color("none"), x=-30)
            image1.rotate(-30)
            out.composite(image1, left=s(500 - 250), top=s(0 - 230) + s(118))
            image1.close()

            image2.shear(background=wand.color.Color("rgba(0,0,0,0)"), x=30)
            image2.rotate(-30)
            image3 = WandImage(image2)
            out.composite(image2, left=s(1000 - 250) - s(72), top=s(860 - 230))
            image2.close()

            image3.flip()
            out.composite(image3, left=s(0 - 250) + s(68), top=s(860 - 230))
            image3.close()

            out.crop(left=80, top=40, right=665, bottom=710)

            buffer = BytesIO()
            out.save(buffer)
            buffer.seek(0)
            return buffer
예제 #18
0
def convert_pdf_to_jpg(tablename, nrows):
    print("Converting")
    path = os.getcwd() + '\\figures\\'
    filename = path + tablename + '_OUTPUT.pdf'
    img = Image(filename=filename, resolution=500)
    imgname = path + tablename + '_OUTPUT.jpeg'
    img.convert('jpeg').save(filename=imgname)

    import PIL
    img = PIL.Image.open(imgname)  #打开图像

    box = (360, 490, 3733, 510 + 95 * nrows)  #根据nrows来剪裁图像
    clip = img.crop(box)
    clip.save(imgname)
    os.remove(filename)
예제 #19
0
파일: grabseeds.py 프로젝트: fossabot/jcvi
def convert_image(
    pngfile,
    pf,
    outdir=".",
    resize=1000,
    format="jpeg",
    rotate=0,
    rows=":",
    cols=":",
    labelrows=None,
    labelcols=None,
):
    resizefile = op.join(outdir, pf + ".resize.jpg")
    mainfile = op.join(outdir, pf + ".main.jpg")
    labelfile = op.join(outdir, pf + ".label.jpg")
    img = Image(filename=pngfile)
    exif = dict(
        (k, v) for k, v in img.metadata.items() if k.startswith("exif:"))

    # Rotation, slicing and cropping of main image
    if rotate:
        img.rotate(rotate)
    if resize:
        w, h = img.size
        if min(w, h) > resize:
            if w < h:
                nw, nh = resize, resize * h / w
            else:
                nw, nh = resize * w / h, resize
            img.resize(nw, nh)
            logging.debug(
                "Image `{0}` resized from {1}px:{2}px to {3}px:{4}px".format(
                    pngfile, w, h, nw, nh))
    img.format = format
    img.save(filename=resizefile)

    rimg = img.clone()
    if rows != ":" or cols != ":":
        w, h = img.size
        ra, rb = slice(rows, h)
        ca, cb = slice(cols, w)
        # left, top, right, bottom
        logging.debug("Crop image to {0}:{1} {2}:{3}".format(ra, rb, ca, cb))
        img.crop(ca, ra, cb, rb)
        img.format = format
        img.save(filename=mainfile)
    else:
        mainfile = resizefile

    # Extract text labels from image
    if labelrows or labelcols:
        w, h = rimg.size
        if labelrows and not labelcols:
            labelcols = ":"
        if labelcols and not labelrows:
            labelrows = ":"
        ra, rb = slice(labelrows, h)
        ca, cb = slice(labelcols, w)
        logging.debug("Extract label from {0}:{1} {2}:{3}".format(
            ra, rb, ca, cb))
        rimg.crop(ca, ra, cb, rb)
        rimg.format = format
        rimg.save(filename=labelfile)
    else:
        labelfile = None

    return resizefile, mainfile, labelfile, exif
예제 #20
0
    color2 = color[3:6]
    color3 = color[6:9]
    # --------------------------------------------------------------

    # ----------get the base layer texture--------------------------
    Scenes = pathwalk('.\\SceneData\\')
    randomScene = random.choice(Scenes)
    randomScene = randomScene[0] + randomScene[1]
    # print(randomScene)
    randomSceneImage = Image(filename=randomScene)

    widthRange = randomSceneImage.size[0] - 100
    heightRange = randomSceneImage.size[1] - 32

    randomSceneImage.crop(left=random.randint(0, widthRange),
                          top=random.randint(0, heightRange),
                          width=100,
                          height=32)
    # randomSceneImage.save(filename='.\\photoWand\\'+str(j+1) + '_texture.jpg')
    # --------------------------------------------------------------

    # ----------create the base layer, base texture +base color-----
    baseImage = Image(
        width=100,
        height=32,
        background=Color('rgb(' + str(color1[0]) + ',' + str(color1[1]) + ',' +
                         str(color1[2]) + ')'))

    # print('base_color = ' + 'rgb('+str(color1[0])+','+str(color1[1])+','+str(color1[2])+')')
    baseImage.composite_channel(channel='undefined',
                                image=randomSceneImage,
                                operator='blend',
예제 #21
0
    def compose_image_slide(self, image_path=None, text=None, slide_id=1):

        image_display_size = (300, 190)

        key = '%s-%s-%03d' % (self.emission.uuid, self.content_object.uuid,
                              slide_id)
        path = os.path.join(SLIDE_BASE_DIR,
                            key + '.{}'.format(IMAGE_OUTPUT_FORMAT))
        url = SLIDE_BASE_URL + key + '.{}'.format(IMAGE_OUTPUT_FORMAT)

        overlay_image = Image(filename=image_path)

        with Drawing() as draw:

            size = overlay_image.size

            if size[0] > size[1]:
                orientation = 'landscape'
                scale = float(image_display_size[1]) / float(size[1])
            else:
                orientation = 'portrait'
                scale = float(image_display_size[1]) / float(size[0])

            overlay_image.resize(int(size[0] * scale), int(size[1] * scale))

            #size = overlay_image.size

            width = 190
            height = 190
            overlay_image.crop(10, 0, width=width, height=height)

            draw.composite('over',
                           left=int(width / 2) - 20,
                           top=10,
                           width=width,
                           height=height,
                           image=overlay_image)

            # text settings
            draw.font = SLIDE_BASE_FONT
            draw.font_size = 14
            draw.text_interline_spacing = 8
            draw.fill_color = Color('white')
            draw.text_antialias = True

            # draw text
            if text:
                draw.text(220, 10, text)

            # compose image
            with Image(filename=SLIDE_BASE_IMAGE) as image:
                draw(image)

                if IMAGE_OUTPUT_FORMAT == 'jpg':
                    image.compression_quality = 62
                    image.format = 'jpeg'

                image.save(filename=path)
                image.save(filename=os.path.join(
                    SLIDE_BASE_DIR, 'debug-{}.{}'.format(
                        slide_id, IMAGE_OUTPUT_FORMAT)))

        try:
            overlay_image.close()
        except Exception as e:
            # TODO: use narrowed exception(s)
            log.warning(
                'unable to close magick/wand overlay image - {}'.format(e))

        return url
예제 #22
0
def benchmark(source):
    """
    run through all tests, timing along the way

    timeit gets mentioned many times:
    http://docs.python.org/3.4/library/timeit.html
    """

    print("Testing moments: ") 
    start_time = time.time()

    print("making path:")
    step_time = time.time()    
    pic = Path(source)
    print(time.time() - step_time, "seconds")
    print("")

    
    print("loading path:")
    step_time = time.time()    
    img = pic.load()
    print(time.time() - step_time, "seconds")
    print("")

    #13.3232491016 seconds:
    ## print "make thumbs: (all)"
    ## step_time = time.time()    
    ## img.make_thumbs()
    ## print time.time() - step_time, "seconds"
    ## print ""

    ## print "removing thumbs: (all)"
    ## step_time = time.time()    
    ## shutil.rmtree("sized")
    ## print time.time() - step_time, "seconds"
    ## print ""

    #3.2377550602 seconds:
    ## print "make thumbs: (tiny)"
    ## step_time = time.time()    
    ## img.make_thumbs(["tiny"])
    ## print time.time() - step_time, "seconds"
    ## print ""

    ## print "removing thumbs: (tiny)"
    ## step_time = time.time()    
    ## shutil.rmtree("sized")
    ## print time.time() - step_time, "seconds"
    ## print ""

    #now break it down to steps:

    #0.000612020492554 seconds
    ## print "make thumbs dirs()"
    ## step_time = time.time()    
    ## img.make_thumb_dirs()
    ## print time.time() - step_time, "seconds"
    ## print ""

    ## print "removing thumbs dirs"
    ## step_time = time.time()    
    ## shutil.rmtree("sized")
    ## print time.time() - step_time, "seconds"
    ## print ""

    #0.00586199760437 seconds
    print("Open image with PIL: ") 
    step_time = time.time()
    image = PILImage.open(source)
    print(time.time() - step_time, "seconds")
    print("")

    print("show sizes: ") 
    step_time = time.time()
    print(image.size)
    print(time.time() - step_time, "seconds")
    print("")

    #0.56491112709 seconds
    print("Copy image buffer in PIL: ") 
    step_time = time.time()
    square = image.copy()
    print(time.time() - step_time, "seconds")
    print("")

    print("resize max: ") 
    step_time = time.time()
    pre_crop = resize_max(400, image.size[0], image.size[1])
    print(pre_crop)
    print(time.time() - step_time, "seconds")
    print("")
    
    #0.108213186264 seconds
    print("Square image (includes copy)") 
    step_time = time.time()
    square = img._square_image(square)    
    print(time.time() - step_time, "seconds")
    print("")

    #xl = 2880
    #3.53499007225 seconds
    print("thumbnail image (2880, ANTIALIAS)") 
    step_time = time.time()
    #image.thumbnail((xl,xl), PILImage.ANTIALIAS)                
    square.thumbnail((400,400), PILImage.ANTIALIAS)                
    print(time.time() - step_time, "seconds")
    print("")

    print("After thumbnail: ", image.size)
    print("")
    
    print("save thumb") 
    step_time = time.time()
    #image.save("test.jpg", "JPEG")
    square.save("test1.jpg", "JPEG")
    print(time.time() - step_time, "seconds")
    print("")

    print("PIL alternative (total)") 
    step_time = time.time()
    #img = resize_pil_orig(image, (400, 400))
    img = resize_pil(image, 400)
    print(time.time() - step_time, "seconds")
    print("")

    img.save("test.jpg", "JPEG")
    
    #shutil.rmtree("sized")

    #os.remove("test.jpg")

    from wand.image import Image

    #0.572014093399 seconds
    print("wand open file") 
    step_time = time.time()
    img = Image(filename=source) 
    print(time.time() - step_time, "seconds")
    print("")

    print("wand clone image") 
    step_time = time.time()
    clone = img.clone()
    print(time.time() - step_time, "seconds")
    print("")
    
    print("wand get dimensions") 
    step_time = time.time()
    width = img.width
    height = img.height
    ## with Image(filename=source) as img:
    ##     width = img.width
    ##     height = img.height
    print(width, height)
    print(time.time() - step_time, "seconds")
    print("")
        
    print("wand crop image") 
    step_time = time.time()
    box = calculate_box(img.width, img.height)
    img.crop(*box)
    print(time.time() - step_time, "seconds")
    print("")

    print("wand save image") 
    step_time = time.time()
    img.save(filename='temp3.jpg')
    print(time.time() - step_time, "seconds")
    print("")

    #THIS TAKES A *LONG* TIME!!!!
    #273.595574856 seconds
    #and will only make a difference for certain types of images
    #made no difference for one test image
    ## print "wand liquid rescale" 
    ## step_time = time.time()
    ## liquid = clone.liquid_rescale(400, 400)
    ## print time.time() - step_time, "seconds"
    ## print ""

    ## print "wand save liquid" 
    ## step_time = time.time()
    ## img.save(filename='temp-liquid.jpg')
    ## print time.time() - step_time, "seconds"
    ## print ""

    #0.238882064819 seconds
    print("epeg resize") 
    step_time = time.time()
    #TODO
    #consider using subprocess...
    #any benefits?
    #os.system("jhead -cmd \"jpegtran -progressive -rotate %s &i > &o\" %s" % (degrees, self.path))
    os.system("epeg -m %s %s temp-epeg.jpg" % (pre_crop, source)) 
    print(time.time() - step_time, "seconds")
    print("")


    ## print "Testing: " 
    ## step_time = time.time()    
    ## print source
    ## print time.time() - step_time, "seconds"
    ## print ""

    print("TOTAL TIME:") 
    print(time.time() - start_time, "seconds")
예제 #23
0
def download_image(request, datafile_id, region, size, rotation,
                   quality, format=None): #@ReservedAssignment
    # Get datafile (and return 404 if absent)
    try:
        datafile = DataFile.objects.get(pk=datafile_id)
    except DataFile.DoesNotExist:
        return HttpResponseNotFound()

    is_public = datafile.is_public()
    if not is_public:
        # Check users has access to datafile
        if not has_datafile_download_access(request=request,
                                            datafile_id=datafile.id):
            return HttpResponseNotFound()

    buf = StringIO()
    try:
        file_obj = datafile.get_image_data()
        if file_obj is None:
            return HttpResponseNotFound()
        from contextlib import closing
        with closing(file_obj) as f:
            with Image(file=f) as img:
                if len(img.sequence) > 1:
                    img = Image(img.sequence[0])
                # Handle region
                if region != 'full':
                    x, y, w, h = map(int, region.split(','))
                    img.crop(x, y, width=w, height=h)
                # Handle size
                if size != 'full':
                    # Check the image isn't empty
                    if 0 in (img.height, img.width):
                        return _bad_request('size', 'Cannot resize empty image')
                    # Attempt resize
                    if not _do_resize(img, size):
                        return _bad_request('size',
                                            'Invalid size argument: %s' % size)
                # Handle rotation
                if rotation:
                    img.rotate(float(rotation))
                # Handle quality (mostly by rejecting it)
                if quality not in ['native', 'color']:
                    return _get_iiif_error('quality',
                    'This server does not support greyscale or bitonal quality.')
                # Handle format
                if format:
                    mimetype = mimetypes.types_map['.%s' % format.lower()]
                    img.format = format
                    if mimetype not in ALLOWED_MIMETYPES:
                        return _invalid_media_response()
                else:
                    mimetype = datafile.get_mimetype()
                    # If the native format is not allowed, pretend it doesn't exist.
                    if mimetype not in ALLOWED_MIMETYPES:
                        return HttpResponseNotFound()
                img.save(file=buf)
                response = HttpResponse(buf.getvalue(), content_type=mimetype)
                response['Content-Disposition'] = \
                    'inline; filename="%s.%s"' % (datafile.filename, format)
                # Set Cache
                if is_public:
                    patch_cache_control(response, public=True, max_age=MAX_AGE)
                else:
                    patch_cache_control(response, private=True, max_age=MAX_AGE)
                return response
    except WandException:
        return HttpResponseNotFound()
    except ValueError:
        return HttpResponseNotFound()
    except IOError:
        return HttpResponseNotFound()
예제 #24
0
파일: card.py 프로젝트: Ja-vi/pnp
class Card(object):
	"""Individual object containing an image and actions to manipulate it.
	Posible kwargs to __init__ are filename, file, image, blob. it will load the image from there"""
	def __init__(self, *args, **kwargs):
		"""Init a new cards with *img* being a wand.image.Image object"""
		self.img = Image(*args, **kwargs)
		self.border = None
		self.changed = True
		self.pixmap()

	def __del__(self):
		self.img.destroy()

	def format(self, fmt=None):
		if fmt is None:
			return self.img.format.lower()
		else:
			self.img.format = fmt

	@set_changed
	def resize(self, width, height, newres=300):
		"""Resize this card to (*width*, *height*) inches, with a resolution of *newres*"""
		self.img.transform(resize=str(int(width*newres)) + "x" + str(int(height*newres)) + "!")
		self.img.reset_coords()
		self.img.resolution = (newres, newres)
		return newres

	def width(self):
		return self.img.size[0]

	def height(self):
		return self.img.size[1]

	def reset_coords(self):
		self.img.reset_coords()

	@set_changed
	def set_border(self, border):
		"""Set a new *border* for this card"""
		if self.border is not None:
			self.del_border()
		self.border = border
		with Color(self.border.colour) as colour:
			self.img.border(colour, self.border.wide, self.border.wide)

	@set_changed
	def crop(self, *args, **kwargs):
		"""Crop this card *top*, *bottom*, *left* and *right* pixels"""
		w, h = self.img.size
		if "right" in kwargs:
			kwargs["right"] = w - kwargs["right"]
		if "bottom" in kwargs:
			kwargs["bottom"] = h - kwargs["bottom"]
		self.img.crop(*args, **kwargs)
		self.reset_coords()

	def del_border(self):
		"""Remove the border of this card"""
		if self.border is not None:
			w = self.border.wide
			self.crop(top=w, bottom=w, right=w, left=w)
			self.border = None
			self.changed = True

	@set_changed
	def trim(self, fuzz=13):
		self.img.trim(fuzz=fuzz)
		self.reset_coords()

	def save_as(self, filename):
		"""Save this card in a file named *filename*"""
		self.img.save(filename = filename)

	def split(self, rows, cols, separation=0):
		"""Divide this cards in *rows* by *cols* cards, and returns a list"""
		width, hight = self.img.size
		width, hight = (int(width), int(hight))
		cardWidth = (width - separation * (cols-1)) / cols
		cardHight = (hight - separation * (rows-1)) / rows
		res = []
		for i in range(rows):
			for j in range(cols):
				with self.img.clone() as clon:
					clon.crop(top=i*cardHight+i*separation, width=cardWidth, left=j*cardWidth+j*separation, height=cardHight)
					clon.reset_coords()
					res.append(Card(image=clon))
		return res

	@set_changed
	def round_corners(self):
		"""Round the corners of the card (setting them to alpha)"""
		pass

	def clone(self):
		c = Card(image=self.img.clone())
		c.border = self.border
		return c

	def pixmap(self):
		"""Update and returns the pixmap (QPixmap) of the contained image"""
		if self.changed:
			self._pixmap = QPixmap(QImage.fromData(self.img.make_blob("jpg"), "jpg"))#self.img.format))
			self.changed = False
		return self._pixmap
예제 #25
0
class Panel:
    def __init__(self,
                 background=None,
                 chars=None,
                 scene=None,
                 doublewide=False):
        if background is None:
            background = randomize_background()
        self.background = Image(filename=background)

        self.doublewide = doublewide
        if self.doublewide is True:
            self.background.crop(0, 0, self.background.height,
                                 self.background.height)
            self.background.transform(resize='1000x500^')
            self.background.transform('1000x500')
        else:
            self.background.crop(0, 0, self.background.height,
                                 self.background.height)
            self.background.transform(resize='500')
            self.background.transform(resize='500x500')

        self.chars = chars
        self.scene = scene
        draw.font = 'plugins/py_toon/fonts/DejaVuSansMono.ttf'
        draw.font_size = 15
        draw.text_kerning = 1
        draw.text_alignment = 'left'

    def setup(self):
        self.add_characters()
        self.speech_bubbles()
        self.background = self.render()
        return self.background

    def speech_bubbles(self):
        curx = 15
        cury = 15

        for action in self.scene[1]:

            actor = action[0]
            line = action[1]
            if not line:
                continue
            line = textwrap.fill(line, 20)

            metrics = draw.get_font_metrics(self.background, line, True)

            ctext = int(metrics.text_width / 2.0)
            draw.fill_color = Color('white')
            draw.stroke_color = Color('black')
            draw.stroke_width = 1.0

            char_center = actor.img.x + int(actor.img.width / 2.0)
            text_center = int(metrics.text_width / 2.0)

            if len(self.scene[1]) == 1:
                cury = randrange(50, 125 + 20)
            else:
                max_y = cury + 20
                if max_y < 1: max_y = 245
                cury = randrange(cury, max_y)
            curx = char_center - text_center
            if curx < 25: curx = 25
            if curx > self.background.width - int(metrics.text_width):
                curx = self.background.width - int(metrics.text_width) - 15

            curx = int(curx)
            cury = int(cury)

            if line.strip() != '':
                draw.round_rectangle(curx - 10, cury,
                                     curx + metrics.text_width + 10,
                                     cury + metrics.text_height + 5, 5, 5)

                draw.fill_color = Color('black')

                draw.text(curx, cury + 15, line)
                curx += metrics.text_width + 10
                cury += int(metrics.text_height + 10)

    def add_characters(self):
        parts = self.background.width / len(self.chars.keys())

        curx = 0
        cury = 0

        char_count = 0
        for i in self.chars.items():
            char = i[1]

            if self.doublewide is True:
                #char.img.resize(175, 175)
                char.img.transform(resize='x150')
            else:
                char.img.resize(125, 125)

            ### contain the character in this "box"
            char_pos = curx + parts - char.img.width
            print 'char_pos:', char_pos
            if char_pos < 1:
                return 'Not enough space to fit everybody.'
            curx = randrange(curx, char_pos)

            cury = self.background.height - char.img.height

            char.img.x = curx
            char.img.y = cury

            char_count += 1

            curx = parts * char_count

            if char_count == 2:
                char.flip()
            self.background.composite(char.img, char.img.x, char.img.y)
            draw(self.background)
            if char_count == 2:
                ### unflip
                char.flip()

    def render(self):
        if self.doublewide is False:
            self.background.border(Color('white'), 5, 5)
        else:
            self.background.border(Color('white'), 10, 10)
        draw(self.background)
        draw.clear()
        return self.background
예제 #26
0
파일: pytoon.py 프로젝트: Senso/Donginger
class Panel:
    def __init__(self, background=None, chars=None, scene=None, doublewide=False):
        if background is None:
            background = randomize_background()
        self.background = Image(filename=background)

        self.doublewide = doublewide
        if self.doublewide is True:
            self.background.crop(0, 0, self.background.height, self.background.height)
            self.background.transform(resize='1000x500^')
            self.background.transform('1000x500')
        else:
            self.background.crop(0, 0, self.background.height, self.background.height)
            self.background.transform(resize='500')
            self.background.transform(resize='500x500')

        self.chars = chars
        self.scene = scene
        draw.font = 'plugins/py_toon/fonts/DejaVuSansMono.ttf'
        draw.font_size = 15
        draw.text_kerning = 1
        draw.text_alignment = 'left'

    def setup(self):
        self.add_characters()
        self.speech_bubbles()
        self.background = self.render()
        return self.background
        
    def speech_bubbles(self):
        curx = 15
        cury = 15

        for action in self.scene[1]:

            actor = action[0]
            line = action[1]
            if not line:
                continue
            line = textwrap.fill(line, 20)

            metrics = draw.get_font_metrics(self.background, line, True)

            ctext = int(metrics.text_width / 2.0)
            draw.fill_color = Color('white')
            draw.stroke_color = Color('black')
            draw.stroke_width = 1.0

            char_center = actor.img.x + int(actor.img.width / 2.0)
            text_center = int(metrics.text_width / 2.0)

            if len(self.scene[1]) == 1:
                cury = randrange(50, 125 + 20)
            else:
                max_y = cury + 20
                if max_y < 1: max_y = 245
                cury = randrange(cury, max_y)
            curx = char_center - text_center
            if curx < 25: curx = 25
            if curx > self.background.width - int(metrics.text_width):
                curx = self.background.width - int(metrics.text_width) - 15

            curx = int(curx)
            cury = int(cury)

            if line.strip() != '':
                draw.round_rectangle(curx - 10, cury, curx + metrics.text_width + 10, cury + metrics.text_height + 5, 5, 5)

                draw.fill_color = Color('black')

                draw.text(curx, cury + 15, line)
                curx += metrics.text_width + 10
                cury += int(metrics.text_height + 10)

    def add_characters(self):
        parts = self.background.width / len(self.chars.keys())

        curx = 0
        cury = 0

        char_count = 0
        for i in self.chars.items():
            char = i[1]

            if self.doublewide is True:
                #char.img.resize(175, 175)
                char.img.transform(resize='x150')
            else:
                char.img.resize(125, 125)

            ### contain the character in this "box"
            char_pos = curx + parts - char.img.width
            print 'char_pos:', char_pos
            if char_pos < 1:
                return 'Not enough space to fit everybody.'
            curx = randrange(curx, char_pos)

            cury = self.background.height - char.img.height

            char.img.x = curx
            char.img.y = cury

            char_count += 1

            curx = parts * char_count

            if char_count == 2:
                char.flip()
            self.background.composite(char.img, char.img.x, char.img.y)
            draw(self.background)
            if char_count == 2:
                ### unflip
                char.flip()

    def render(self):
        if self.doublewide is False:
            self.background.border(Color('white'), 5, 5)
        else:
            self.background.border(Color('white'), 10, 10)
        draw(self.background)
        draw.clear()
        return self.background
예제 #27
0
user_document = input("Input document name in quotes (include .pdf): ")

image_pdf = Image(filename=user_document, resolution=230)
image_jpeg = image_pdf.convert('JPEG')

print image_jpeg.size
i = 1
first_page = image_jpeg.sequence[0]

#Crop first page to get first name, date, patriarch name, etc. ?

#FIGURE OUT CROP ISSUES    ----   Creates a new paragraph at each new line on the PDF...
img_page = Image(image=image_jpeg.sequence[0])
regular_page_height = int(0.9122734 * img_page.height)
img_page.crop(int(0.0525909 * img_page.width), int(0.2575078 * img_page.height), width=int(0.8957759 * img_page.width), height=int(0.6813658 * img_page.height))
img_page.save(filename='PB-raw0.jpeg')
top = Image(filename='PB-raw0.jpeg')

for img in image_jpeg.sequence[1:]:
    img_page = Image(image=img)
    #img_page.crop(95,145,1845,2355) #LTRB
    img_page.crop(int(0.0525909 * img_page.width), int(0.05031 * img_page.height), width=int(0.8957759 * img_page.width), height=int(0.9122734 * img_page.height))
    #req_image.append(img_page.make_blob('JPEG'))
#print image_jpeg.crop(10, 20, width=45, height=220)
    img_page.save(filename='PB-raw' + str(i) + '.jpeg')
    i += 1

#with Image(filename='PB-raw0.jpeg') as top:
with Image(width=top.width,
           height=(int(0.75 * top.height) + (regular_page_height * (i-1))))  as stitch:
예제 #28
0
    def Recursar(self, numero_cupom_fiscal, ean_enviado, descricao_recurso):
        try:
            time.sleep(1)
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                lambda browser: self.browser.execute_script(
                    'return jQuery.active == 0'))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.presence_of_element_located(
                    (By.XPATH, '//*[@id="edtNumeroGuia"]')))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.element_to_be_clickable(
                    (By.XPATH, '//*[@id="edtNumeroGuia"]')))
            guia = self.browser.find_element_by_xpath(
                '//*[@id="edtNumeroGuia"]')
            guia.clear()
            clipboard.copy(str(numero_cupom_fiscal).zfill(11).strip())
            guia.send_keys(Keys.CONTROL, 'v')

            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                lambda browser: self.browser.execute_script(
                    'return jQuery.active == 0'))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.presence_of_element_located(
                    (By.XPATH,
                     '//*[@id="divGuias"]/div/fieldset/div[1]/div/div/a')))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.element_to_be_clickable(
                    (By.XPATH,
                     '//*[@id="divGuias"]/div/fieldset/div[1]/div/div/a')))
            self.browser.find_element_by_xpath(
                '//*[@id="divGuias"]/div/fieldset/div[1]/div/div/a').click()
        except TimeoutException as e:
            return {
                'status_recurso': 'E',
                'log_recurso':
                u'Não foi possível informar Cupom Fiscal para recursar!',
                'print_recurso': None
            }

        try:
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                lambda browser: self.browser.execute_script(
                    'return jQuery.active == 0'))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.presence_of_element_located(
                    (By.XPATH, '//*[@id="tabelaGuia"]/tbody/tr/td[1]')))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.element_to_be_clickable(
                    (By.XPATH, '//*[@id="tabelaGuia"]/tbody/tr/td[1]')))
            self.browser.find_element_by_xpath(
                '//*[@id="tabelaGuia"]/tbody/tr/td[1]').click()
        except TimeoutException as e:
            return {
                'status_recurso': 'E',
                'log_recurso': u'Cupom Fiscal não localizado para recursar!',
                'print_recurso': None
            }

        try:
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                lambda browser: self.browser.execute_script(
                    'return jQuery.active == 0'))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.presence_of_element_located(
                    (By.XPATH,
                     '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr'
                     )))
            wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                EC.element_to_be_clickable(
                    (By.XPATH,
                     '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr'
                     )))
            linhas_tabela = self.browser.find_elements_by_xpath(
                '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr')
        except TimeoutException as e:
            return {
                'status_recurso': 'E',
                'log_recurso': u'Cupom Fiscal não possui itens para recursar!',
                'print_recurso': None
            }

        numero_linha = 1
        for linha_tabela in linhas_tabela:
            ean_linha = self.browser.find_element_by_xpath(
                '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr[' +
                str(numero_linha) + ']/td[1]').text

            if ean_linha == str(ean_enviado).zfill(13).strip():
                try:
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            lambda browser: self.browser.execute_script(
                                'return jQuery.active == 0'))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes
                    ).until(
                        EC.presence_of_element_located((
                            By.XPATH,
                            '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr['
                            + str(numero_linha) + ']/td[7]/a[2]')))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes
                    ).until(
                        EC.element_to_be_clickable((
                            By.XPATH,
                            '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr['
                            + str(numero_linha) + ']/td[7]/a[2]')))
                    self.browser.find_element_by_xpath(
                        '//*[@id="divEventos"]/div[2]/fieldset/div/table/tbody/tr['
                        + str(numero_linha) + ']/td[7]/a[2]').click()

                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            lambda browser: self.browser.execute_script(
                                'return jQuery.active == 0'))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            EC.presence_of_all_elements_located(
                                (By.XPATH,
                                 '//div[starts-with(@id,"heading")]/h4/a')))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            EC.element_to_be_clickable(
                                (By.XPATH,
                                 '//div[starts-with(@id,"heading")]/h4/a')))

                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            lambda browser: self.browser.execute_script(
                                'return jQuery.active == 0'))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            EC.element_to_be_clickable(
                                (By.XPATH,
                                 '//div[starts-with(@id,"heading")]/h4/a')))
                    glosas = self.browser.find_elements_by_xpath(
                        '//div[starts-with(@id,"heading")]/h4/a')
                except TimeoutException as e:
                    return {
                        'status_recurso': 'E',
                        'log_recurso':
                        u'Não foi possível encontrar motivos de glosas para recursar!',
                        'print_recurso': None
                    }

                codigo_justificativa = None
                for glosa in glosas:
                    try:
                        if codigo_justificativa != glosa.get_attribute(
                                'href').replace(
                                    self.browser.current_url + 'justificativa',
                                    ''):
                            time.sleep(1)
                            codigo_justificativa = glosa.get_attribute(
                                'href').replace(
                                    self.browser.current_url + 'justificativa',
                                    '')
                            glosa.click()

                            wait = WebDriverWait(
                                self.browser, timeout_requisicoes).until(
                                    lambda browser: self.browser.
                                    execute_script('return jQuery.active == 0'
                                                   ))
                            wait = WebDriverWait(
                                self.browser, timeout_requisicoes
                            ).until(
                                EC.presence_of_element_located((
                                    By.XPATH,
                                    '//*[@id="myModal"]/div/div/div/div/div/button'
                                )))
                            wait = WebDriverWait(
                                self.browser, timeout_requisicoes
                            ).until(
                                EC.element_to_be_clickable((
                                    By.XPATH,
                                    '//*[@id="myModal"]/div/div/div/div/div/button'
                                )))
                            self.browser.find_element_by_xpath(
                                '//*[@id="myModal"]/div/div/div/div/div/button'
                            ).click()

                            wait = WebDriverWait(
                                self.browser, timeout_requisicoes).until(
                                    lambda browser: self.browser.
                                    execute_script('return jQuery.active == 0'
                                                   ))
                            wait = WebDriverWait(
                                self.browser, timeout_requisicoes).until(
                                    EC.presence_of_element_located(
                                        (By.XPATH, '//textarea[@id="j' +
                                         codigo_justificativa + '"]')))
                            wait = WebDriverWait(
                                self.browser, timeout_requisicoes).until(
                                    EC.element_to_be_clickable(
                                        (By.XPATH, '//textarea[@id="j' +
                                         codigo_justificativa + '"]')))
                            justificativa = self.browser.find_element_by_xpath(
                                '//textarea[@id="j' + codigo_justificativa +
                                '"]')
                            justificativa.clear()
                            justificativa.send_keys(
                                unicode(descricao_recurso, 'iso-8859-1'))
                    except TimeoutException as e:
                        return {
                            'status_recurso': 'E',
                            'log_recurso':
                            u'Não foi informar a justificativa para recursar!',
                            'print_recurso': None
                        }

                wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                    lambda browser: self.browser.execute_script(
                        'return jQuery.active == 0'))
                tela_recurso = self.browser.find_element_by_xpath(
                    '//div[@id="divJustificativaEvento"]/div/div')
                posicao = tela_recurso.location
                tamanho = tela_recurso.size
                print_screen = self.browser.get_screenshot_as_base64()
                print_screen = Image(blob=b64decode(print_screen))
                print_screen.crop(left=int(posicao["x"]),
                                  top=int(posicao["y"]),
                                  width=int(tamanho["width"]),
                                  height=int(tamanho["height"]))
                print_screen.make_blob()

                try:
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            lambda browser: self.browser.execute_script(
                                'return jQuery.active == 0'))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            EC.presence_of_element_located(
                                (By.XPATH,
                                 '//*[@id="btnSalvarJustificativaEvento"]')))
                    wait = WebDriverWait(
                        self.browser, timeout_requisicoes).until(
                            EC.element_to_be_clickable(
                                (By.XPATH,
                                 '//*[@id="btnSalvarJustificativaEvento"]')))
                    self.browser.find_element_by_xpath(
                        '//*[@id="btnSalvarJustificativaEvento"]').click()
                except TimeoutException as e:
                    return {
                        'status_recurso': 'E',
                        'log_recurso': u'Não possível confirmar o recurso!',
                        'print_recurso': None
                    }

                wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                    lambda browser: self.browser.execute_script(
                        'return jQuery.active == 0'))
                wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                    EC.presence_of_element_located(
                        (By.XPATH, '//*[@id="btnFecharJustificativaEvento"]')))
                wait = WebDriverWait(self.browser, timeout_requisicoes).until(
                    EC.element_to_be_clickable(
                        (By.XPATH, '//*[@id="btnFecharJustificativaEvento"]')))
                self.browser.find_element_by_xpath(
                    '//*[@id="btnFecharJustificativaEvento"]').click()

                return {
                    'status_recurso': 'F',
                    'log_recurso': u'Recurso efetuado com sucesso!',
                    'print_recurso': print_screen
                }

            numero_linha = numero_linha + 1

        return {
            'status_recurso': 'E',
            'log_recurso': u'EAN enviado não encontrado para recursar!',
            'print_recurso': None
        }
예제 #29
0
def create_box(filepdf,
               page_num,
               page_width,
               page_height,
               top,
               left,
               bottom,
               right,
               quality=200,
               margin=0.1):
    #extract filename:
    filebasename = os.path.basename(pdf_path + filepdf)

    print("extracting box from %s..." % filepdf)

    #used to generate temp file name.
    uuid_set = str(uuid.uuid4().fields[-1])[:5]
    img = Image(filename=pdf_path + filepdf, resolution=200)
    #resize image
    img.resize(page_width, page_height)
    #crop image
    img.compression_quality = int(quality)
    #save box
    img.save(filename=path_temp + "%s.jpg" % (uuid_set))
    print("temp pages saved.")

    #search all image in temp path. file name ends with uuid_set value
    list_im = glob.glob(path_temp + "%s*.jpg" % uuid_set)

    img_page = Image(filename=path_temp + "%s-%s.jpg" %
                     (uuid_set, str(page_num - 1)),
                     resolution=200)

    width, height = img_page.size

    #define zone to crop
    left = round(left * width / page_width)
    right = round(right * width / page_width)
    top = round(top * height / page_height)
    bottom = round(bottom * height / page_height)

    box_width = right - left
    box_height = bottom - top

    #add a margin around box
    left = round(left -
                 margin * box_width) if round(left -
                                              margin * box_width) > 1 else 1
    right = round(right + margin * box_width) if round(
        right + margin * box_width) < width - 1 else width - 1
    top = round(top -
                margin * box_height) if round(top -
                                              margin * box_height) > 1 else 1
    bottom = round(bottom + margin * box_height) if round(
        bottom + margin * box_height) < height - 1 else height - 1

    #crop the file
    img_page.crop(left, top, right, bottom)

    print("saving the image...")
    img_page.compression_quality = quality
    img_page.save(filename=path_boxs + filepdf +
                  "-%s_pn-%s_pw-%s_ph-%s_t-%s_l-%s_b-%s_r-%s.jpg" %
                  (uuid_set, str(page_num), str(page_width), str(page_height),
                   str(top), str(left), str(bottom), str(right)))

    for i in list_im:
        os.remove(i)
    return
예제 #30
0
    def draw_lip(self, top=False, bottom=False):
        if "back" not in self.faces:
            return None

        if not (top ^ bottom): # 1 and only 1 of top or bottom should be true
            return None

        # First draw a full mask with the lip shape
        lip_full_mask_image = Image(width=math.ceil(self.tuckbox['width'] * POINT_PER_MM),
                                    height=math.ceil(self.lip_size() * POINT_PER_MM))
        lip_full_draw = Drawing()

        lip_full_draw.scale(POINT_PER_MM, POINT_PER_MM)

        # This cannot be too "thin" or the floodfill later would spill over
        lip_full_draw.stroke_width = max(2 / POINT_PER_MM, RESOLUTION / (200 * POINT_PER_MM))

        lip_full_draw.fill_color = Color('white')
        lip_full_draw.color(0, 0, 'reset')
        lip_full_draw.draw(lip_full_mask_image)

        lip_full_draw.stroke_color = Color('black')

        # 1/2 left of lip
        lip_full_draw.bezier([(0, self.lip_size()),
                              (0, self.lip_size() - .75*self.lip_size()),
                              (.2 * self.tuckbox['width'],
                               self.lip_size() - self.lip_size()),
                              (.5 * self.tuckbox['width'], self.lip_size() - self.lip_size())])

        # 1/2 right of lip
        lip_full_draw.bezier([(self.tuckbox['width'], self.lip_size()),
                              (self.tuckbox['width'],
                               self.lip_size() - .75*self.lip_size()),
                              (.8 * self.tuckbox['width'],
                               self.lip_size() - self.lip_size()),
                              (.5 * self.tuckbox['width'], self.lip_size() - self.lip_size())])

        lip_full_draw.draw(lip_full_mask_image)

        lip_full_draw.fill_color = Color('black')
        lip_full_draw.border_color = Color('black')
        lip_full_draw.color(.5 * self.tuckbox['width'],
                            0.8*self.lip_size(), 'filltoborder')

        lip_full_draw.draw(lip_full_mask_image)

        if self.faces['back'][:1] == "#":
            lip_image = Image(width = lip_full_mask_image.width, height = lip_full_mask_image.height,
                            background = Color(self.faces['back']))
        else:
            # Prepare the front image
            angle = 180 if "back_angle" not in self.options else (self.options["back_angle"]+2)*90

            if bottom:
                angle = (angle + 180) % 360

            _, file_extension = os.path.splitext(self.faces['back'])
            tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=file_extension)
            self.resize_rotate_image(self.faces['back'], tmp_file.name, angle, math.ceil(self.tuckbox['width'] * POINT_PER_MM),
                            math.ceil(self.tuckbox['height'] * POINT_PER_MM))
            lip_image = Image(filename=tmp_file.name)
            lip_image.crop(top=lip_image.height - lip_full_mask_image.height)

        # u = image pixel
        # h = height
        # j = row
        # Radius of the hold is {self.tuckbox['width']*.1}
        # if value is 1 (or more_, it's white
        # Top is the tip of the lip, bottom is attached to the box
        # finger_hold_size_save is the row # at which it should be no attenuation below
        finger_hold_size_save = str(
            int(lip_image.height - math.ceil(self.tuckbox['width'] * POINT_PER_MM * 0.1)))
        lip_image = lip_image.fx(
            "j>"+finger_hold_size_save+"?u:1+(u-1)*(j/"+finger_hold_size_save+")")

        lip_image.composite(operator='lighten', image=lip_full_mask_image)

        if bottom:
            lip_image.rotate(180)

        return lip_image
def generate(count):
    # ---------------get three colors-------------------------------
    colorString = random.choice(result)
    color = []
    for i in colorString:
        color += [int(i)]
    # for i in range(len(color)):
    #     color[i] = math.floor(color[i]/255*65535)
    color1 = color[0:3]
    color2 = color[3:6]
    color3 = color[6:9]
    # --------------------------------------------------------------

    # ----------get the base layer texture--------------------------
    Scenes = pathwalk('./SceneData')

    randomScene = random.choice(Scenes)
    randomScene = randomScene[0] + '/' + randomScene[1]
    print(randomScene)
    randomSceneImage = Image(filename=randomScene)

    widthRange = randomSceneImage.size[0] - 100
    heightRange = randomSceneImage.size[1] - 32

    randomSceneImage.crop(left=random.randint(0, widthRange), top=random.randint(0, heightRange), width=100, height=32)
    # randomSceneImage.save(filename='.\\photoWand\\'+str(j+1) + '_texture.jpg')
    # --------------------------------------------------------------

    # ----------create the base layer, base texture +base color-----

    baseImage = Image(width=100, height=32, background=Color('rgb('+str(color1[0])+','+str(color1[1])+','+str(color1[2])+')'))

    # print('base_color = ' + 'rgb('+str(color1[0])+','+str(color1[1])+','+str(color1[2])+')')
    baseImage.composite_channel(channel='undefined', image=randomSceneImage, operator='blend', left=0, top=0)
    baseImage.gaussian_blur(4, 10)
    baseImage.resolution = (96, 96)
    # --------------------------------------------------------------

    # -----generate font--------------------------------------------
    word = randomWords()
    fonts = pathwalk('./fonts/font_en/')
    randomFont = random.choice(fonts)
    randomFont = randomFont[0] + randomFont[1]

    initialPointsize = 45

    draw = Drawing()
    draw.font = randomFont

    tmp = int(math.floor(abs(random.gauss(0, 1))*6))
    if random.randint(1, 2) == 1:
        rotateX = random.randint(0, tmp)
    else:
        rotateX = random.randint(360-tmp, 360)

    draw.rotate(rotateX)
    # --------------------------------------------------------------
    # --------get suitable FontPointSize----------------------------
    draw.font_size = initialPointsize
    metric = draw.get_font_metrics(image=baseImage, text=word)

    while metric.text_width > 100 or metric.text_height > 36:
        initialPointsize -= 5
        draw.font_size = initialPointsize
        metric = draw.get_font_metrics(image=baseImage, text=word)
    # --------------------------------------------------------------

    # ----------italic----------------------------------------------
    if random.random() > 0.5:
        draw.font_style = 'italic'
    # --------------------------------------------------------------

    # ----------underline-------------------------------------------
    if random.random() > 0.5:
        draw.text_decoration = 'underline'
    # --------------------------------------------------------------

    # ----------gravity---------------------------------------------
    draw.gravity = 'center'
    # --------------------------------------------------------------

    # --------------shadow/border-----------------------------------
    if random.random() < 0.5:
        # shadow
        addx = math.ceil(random.gauss(0, 2))
        addy = math.ceil(random.gauss(0, 2))
        draw.fill_color = Color('black')
        draw.text(x=abs(int(addx)), y=abs(int(addy)), body=word)

    else:
        # border
        draw.stroke_color = Color('rgb('+str(color3[0])+','+str(color3[1])+','+str(color3[2])+')')
        draw.stroke_width = math.ceil(initialPointsize/10)-1
    # --------------------------------------------------------------

    # ----------print word------------------------------------------
    draw.fill_color = Color('rgb('+str(color2[0])+','+str(color2[1])+','+str(color2[2])+')')
    draw.text(x=0, y=0, body=word)
    draw.draw(baseImage)
    # --------------------------------------------------------------

    # ------------gray----------------------------------------------
    baseImage.colorspace = 'gray'
    # --------------------------------------------------------------

    print(word)
    baseImage.save(filename='./photo_en/'+str(count+1)+'_'+word+'.jpg')
예제 #32
0
def process_image(raw_queue, ready_queue, global_config):
    from wand.image import Image

    # get an image from the queue
    try:
        image_config = raw_queue.get()
    except Queue.Empty:
        return

    image = Image(filename=image_config['filename'])
    raw_areas = []
    all_areas = []
    done = []

    temp_areas = [i for i in image_config.keys() if i[:4].lower() == 'area']
    for area in temp_areas:
        area = image_config[area]
        raw_areas.append(tuple([float(i) for i in area.split(',')]))

    temp_positions = [
        i for i in image_config.keys() if i[:8].lower() == 'position'
    ]
    for pos in temp_positions:
        pos = image_config[pos]
        if global_config['units'] == 'pixels':
            x, y = [int(i) for i in pos.split(',')]
            w, h = image.size
        else:
            x, y = [int(int(i) * global_config['dpi']) for i in pos.split(',')]
            w, h = [int(int(i) / global_config['dpi']) for i in image.size]

        this_config = dict(image_config)
        this_config['area'] = (x, y, w, h)
        ready_queue.put(this_config)

        #raw_areas.append((x,y,w,h))

    # REWRITE THIS!
    for area in raw_areas:
        if global_config['units'] == 'pixels':
            x, y, w, h = [int(i) for i in area]
        else:
            x, y, w, h = [int(float((i)) * global_config['dpi']) for i in area]
        all_areas.append((x, y, w, h))

    # process each image
    for x, y, w, h in all_areas:
        if (w, h) not in done:
            # A U T O C R O P
            autocrop = image_config.get('autocrop', None)
            if autocrop:
                r0 = float(w) / h
                r1 = float(image.width) / image.height

                if r1 > r0:
                    scale = float(h) / image.height
                    sw = int(image.width * scale)
                    cx = int((sw - w) / 2)
                    image.resize(sw, h)
                    image.crop(left=cx, width=sw, height=h)

            # S C A L E
            scale = image_config.get('scale', None)
            if scale:
                image.resize(w, h)

            # F I L T E R S
            """
            filters make heavy use of subprocess, so we need to make a temporary file
            for it to manipulate.
            """
            filter = image_config.get('filter', None)
            if filter:
                scratch = 'prefilter-{}.miff'.format(image_config['filename'])
                image.filename = image_config['filename']
                image.format = os.path.splitext(scratch)[1][1:]
                image.save(filename=scratch)
                image.close()
                image = None

                if filter.lower() == 'toaster':
                    scratch = toaster(scratch, sw, h)

                # create a new Image object for the queue
                with Image(filename=scratch) as temp_image:
                    image = Image(image=temp_image)
                del temp_image

                # delete our temporary image file
                os.unlink(scratch)

            image.format = 'png'
            image.save(filename=image_config['filename'])
            image.close()

        done.append((w, h))
        this_config = dict(image_config)
        this_config['area'] = (x, y, w, h)
        ready_queue.put(this_config)

    ready_queue.close()
    raw_queue.task_done()
예제 #33
0
    color1 = color[0:3]
    color2 = color[3:6]
    color3 = color[6:9]
    # --------------------------------------------------------------

    # ----------get the base layer texture--------------------------
    Scenes = pathwalk('.\\SceneData\\')
    randomScene = random.choice(Scenes)
    randomScene = randomScene[0] + randomScene[1]
    # print(randomScene)
    randomSceneImage = Image(filename=randomScene)

    widthRange = randomSceneImage.size[0] - 100
    heightRange = randomSceneImage.size[1] - 32

    randomSceneImage.crop(left=random.randint(0, widthRange), top=random.randint(0, heightRange), width=100, height=32)
    # randomSceneImage.save(filename='.\\photoWand\\'+str(j+1) + '_texture.jpg')
    # --------------------------------------------------------------

    # ----------create the base layer, base texture +base color-----
    baseImage = Image(width=100, height=32, background=Color('rgb('+str(color1[0])+','+str(color1[1])+','+str(color1[2])+')'))

    # print('base_color = ' + 'rgb('+str(color1[0])+','+str(color1[1])+','+str(color1[2])+')')
    baseImage.composite_channel(channel='undefined', image=randomSceneImage, operator='blend', left=0, top=0)
    baseImage.gaussian_blur(4, 10)
    baseImage.resolution = (96, 96)
    # --------------------------------------------------------------

    # -----generate font--------------------------------------------
    word = python2access.randomWords()
    fonts = pathwalk('.\\googleFonts\\')
예제 #34
0
def crop(jpeg, width=128, height=171):
    """Returns a cropped version of an image"""
    img = Image(blob=jpeg)
    img.crop(left=3, top=3, width=width, height=height)
    return img.make_blob()
예제 #35
0
class Render:
	def __init__(self, geojson, width=1024, height=1024):
		self.minxtile = 0
		self.minytile = 0

		self.number_of_rows = 0
		self.number_of_cols = 0

		self.rendering_zoom = 13
		self.tile_provider = 'OCM'
		self.square_rendering = False
		self.center = 0
		self.stroke_width = 3

		self.render_width = int(width)
		self.render_height = int(height)

		self.width_in_pixel = 0
		self.height_in_pixel = 0

		self.bounds = ''
		self.rendering_bounds = ''

		self.tile_cache_path = './cache'
		self.cache_path = ''

		self.img = ''

		self.debug = False

		self.render_quality = 90

		self.geojson = json.loads(geojson)

		self.prepare()

	def init_cache(self):
		self.cache_path = self.get_cache_path()

		if not os.path.isdir(self.cache_path):
			os.makedirs(self.cache_path)

	def get_cache_path(self):
		return "%s/%s/%s" % (self.tile_cache_path, self.tile_provider, self.rendering_zoom)

	def get_tile(self, tile):
		tile_path = "%s/%s/%s.png" % (self.cache_path, tile[0], tile[1])

		if not os.path.exists(tile_path) :

			tile_dir = os.path.dirname(tile_path)

			if not os.path.isdir(tile_dir):
				os.makedirs(tile_dir)

			url = self.get_tile_url(tile)
			response = urlopen(url)
			url = self.get_tile_url(tile)
			f = open(tile_path, 'w+')
			f.write(response.read())

		f = open(tile_path, 'r')
		return f


	def prepare(self):
		self.get_bounds()
		self.define_zoom_level()
		self.get_rendering_bounds()
		self.init_cache()

	def process(self):
		tiles = self.get_tiles_for_bounds()
		self.generate_background(tiles)
		return self.generate_track()

	def define_zoom_level(self) :
		"""
		Define the best zoom level giving the specified size
		for the image. Starting from the higher zoom level (18)
		then decrementing to get the best level.
		"""

		self.rendering_zoom = 18
		self.get_size_from_bounds_and_zoom_level()

		while (self.width_in_pixel > self.render_width or self.height_in_pixel > self.render_height) and self.rendering_zoom > 1 :
			self.rendering_zoom = self.rendering_zoom - 1
			self.get_size_from_bounds_and_zoom_level()
			print "define_zoom_level w: %s, h: %s, z: %s" % (self.width_in_pixel, self.height_in_pixel, self.rendering_zoom)


	def get_bounds(self):

		max = np.max(self.geojson['coordinates'], axis=0)
		max_lon = max[0]
		max_lat = max[1]
		max_ele = max[2]

		min = np.min(self.geojson['coordinates'], axis=0)
		min_lon = min[0]
		min_lat = min[1]
		min_ele = min[2]

		self.bounds = Bounds(min_lon, max_lon, min_lat, max_lat)

	def get_rendering_bounds(self):

		center_lat = (self.bounds.se.lat + self.bounds.nw.lat) / 2.0
		center_lon = (self.bounds.se.lon + self.bounds.nw.lon) / 2.0
		center = Point(center_lon, center_lat)
		center.project(self.rendering_zoom)
		self.center = center

		top_left_x = center.x - (self.render_width / 2.0)
		top_left_y = center.y - (self.render_height / 2.0)
		top_left = Point.from_xy(top_left_x, top_left_y)
		top_left.unproject(self.rendering_zoom)

		bottom_x = center.x + (self.render_width / 2.0)
		bottom_y = center.y + (self.render_height / 2.0)
		bottom_right = Point.from_xy(bottom_x, bottom_y)
		bottom_right.unproject(self.rendering_zoom)

		self.rendering_bounds = Bounds(top_left.lon, bottom_right.lon, bottom_right.lat, top_left.lat)

		print self.rendering_bounds

	def get_size_from_bounds_and_zoom_level(self):

		# top left point
		top_left = Point(self.bounds.nw.lon, self.bounds.nw.lat)
		top_left.project(self.rendering_zoom)

		# top right point
		top_right = Point(self.bounds.se.lon, self.bounds.nw.lat)
		top_right.project(self.rendering_zoom)

		# calculate width in px
		width = math.fabs(top_left.x-top_right.x)

		# bottom left point
		bottom_left = Point(self.bounds.nw.lon, self.bounds.se.lat)
		bottom_left.project(self.rendering_zoom)

		# calculte height in px
		height = math.fabs(top_left.y-bottom_left.y)

		self.width_in_pixel = width
		self.height_in_pixel = height

	def get_tiles_for_bounds(self):
		"""
			Returns a matrix of tile corresponding to the bounds
		"""
		self.rendering_bounds.nw.project(self.rendering_zoom)
		self.rendering_bounds.se.project(self.rendering_zoom)

		nw_tile_x, nw_tile_y = self.rendering_bounds.nw.get_tile()
		se_tile_x, se_tile_y = self.rendering_bounds.se.get_tile()

		x = [int(nw_tile_x), int(se_tile_x)]
		x.sort()
		y = [int(nw_tile_y), int(se_tile_y)]
		y.sort()

		# Create the range of the tiles
		tile_x_range = range(x[0], x[1]+1)
		tile_y_range = range(y[0], y[1]+1)

		self.number_of_cols = len(tile_x_range)
		self.number_of_rows = len(tile_y_range)
		self.minxtile = tile_x_range[0]
		self.minytile = tile_y_range[0]

		i = 0

		# Create a Matrix of tiles
		matrix = [[0 for x in range(self.number_of_cols)] for y in range(self.number_of_rows)]

		# Loop over the rows (y tiles)
		for y_tile in tile_y_range:
			j = 0
			# Loop over the columns (x tiles)
			for x_tile in tile_x_range:
				matrix[i][j] = [x_tile, y_tile]
				# increment the columns
				j += 1
			# increment lines
			i += 1

		return matrix

	def generate_background(self, tiles) :
		"""
			Displays the tiles on the background
		"""

		self.img = Image(width=self.number_of_cols*256, height=self.number_of_rows*256)

		current_row = 0

		for row in tiles :
			current_col = 0
			for tile in row:
				response = self.get_tile(tile)
				try:
					with Image(file=response) as tile_img:
						draw = Drawing()
						draw.composite(operator='add', left=current_col*256, top=current_row*256, width=tile_img.width, height=tile_img.height, image=tile_img)
						draw(self.img)
				finally:
					response.close()

				current_col += 1

			current_row += 1

	def get_tile_url(self, tile) :
		"""
			Returns the url for a specified tile
		"""
		return "http://tile.openstreetmap.org/%s/%s/%s.png" % (self.rendering_zoom, tile[0], tile[1])

	def generate_track(self) :

		draw = Drawing()
		draw.stroke_width = 2
		draw.stroke_color = Color('red')
		draw.fill_color = Color('transparent')

		points = []

		# Loop over the coordinates to create a list of tuples
		for coords in self.geojson['coordinates'] :
			pt = Point(coords[0], coords[1])
			pt.project(self.rendering_zoom)
			x, y = pt.get_xy()
			x = round(x - (self.minxtile * 256))
			y = round(y - (self.minytile * 256))
			points.append((x, y))

		# draw the polyline
		draw.polyline(points)

		# apply to the image
		draw(self.img)

		# self.rendering_bounds.nw.project(self.rendering_zoom)
		x = int(self.rendering_bounds.nw.tile_x - self.minxtile)
		y = int(self.rendering_bounds.nw.tile_y - self.minytile)

		self.crop(x, y)
		self.img.format = 'jpeg'
		# self.img.save(filename='image.jpg')
		return self.img.make_blob('jpeg')

	def crop(self, x, y):
		x = self.rendering_bounds.nw.x - (self.minxtile * 256)
		y = self.rendering_bounds.nw.y - (self.minytile * 256)
		self.img.crop(int(x), int(y), width=self.render_width, height=self.render_height)
예제 #36
0
    def _extract_qp(self, file_name):
        mark_sum = 0
        print "Extracting QP"

        # Load pdf
        laparams = LAParams()
        rsrcmgr = PDFResourceManager()
        document = file(file_name, 'rb')
        device = PDFPageAggregator(rsrcmgr, laparams=laparams)
        interpreter = PDFPageInterpreter(rsrcmgr, device)

        q_num = 0
        for i, page in enumerate(PDFPage.get_pages(document)):
            # Get page layout
            interpreter.process_page(page)
            layout = device.get_result()

            # Extract metadata
            textboxes = [
                r for r in layout._objs if type(r) is LTTextBoxHorizontal
            ]
            work_out_y = 0
            answer_header = 0
            marks = []

            for t in textboxes:
                text = t.get_text()
                if "Answer space for question" in text:
                    work_out_y = int(t.y0)
                elif "marks]" in text:
                    marks.extend(find_between(text, "[", " marks]"))
                elif "mark]" in text:
                    marks.extend(find_between(text, "[", " mark]"))
                elif "......" in text:
                    # TODO: Find the correct amount of dots
                    pass
                elif text in ["QUESTION\n", "PART\n", "REFERENCE\n"]:
                    pass
                elif text in [
                        "Do not write\noutside the\n", "box\n", "Turn over s\n"
                ]:
                    pass
                elif text == "Answer all questions.\n":
                    answer_header = 74
                else:
                    pass
                    # print repr(text)

            marks = [int(m) for m in marks]
            mark_sum += sum(marks)

            # Comver page into image
            img_path = "{}[{}]".format(file_name, i)
            img = Image(filename=img_path, resolution=int(72 * Paper.QUALITY))

            # Set crop positions
            x = 46 * Paper.QUALITY
            y = (66 + answer_header) * Paper.QUALITY
            width = 489 * Paper.QUALITY
            height = (761 - answer_header - work_out_y) * Paper.QUALITY

            # Check for blank pages
            if height <= Paper.QUALITY or work_out_y <= 0:
                continue

            # Crop and save the image
            q_num += 1
            img.crop(x, y, width=width, height=height)
            img_path = os.path.join(self._folder, "q{}.jpg".format(q_num))
            img.save(filename=img_path)

            # Add question to questions
            self._questions.append(Question(img_path, q_num, marks))

        print "Marks: {}".format(mark_sum)
예제 #37
0
    def process(self, descriptor: StreamDescriptor, context: dict):

        # Ensuring the wand package is installed.
        ensure_wand()
        # noinspection PyPackageRequirements
        from wand.image import Image as WandImage

        # Copy the original info
        # generating thumbnail and storing in buffer
        # noinspection PyTypeChecker
        img = WandImage(file=descriptor)

        if self.crop is None and (
                self.format is None or img.format == self.format) and (
                    (self.width is None or img.width == self.width) and
                    (self.height is None or img.height == self.height)):
            img.close()
            descriptor.prepare_to_read(backend='memory')
            return

        if 'length' in context:
            del context['length']

        # opening the original file
        output_buffer = io.BytesIO()
        with img:
            # Changing format if required.
            if self.format and img.format != self.format:
                img.format = self.format

            # Changing dimension if required.
            if self.width or self.height:
                width, height, _ = validate_width_height_ratio(
                    self.width, self.height, None)
                img.resize(
                    width(img.size) if callable(width) else width,
                    height(img.size) if callable(height) else height)

            # Cropping
            if self.crop:
                img.crop(
                    **{
                        key: int(
                            int(value[:-1]) / 100 *
                            (img.width if key in ('width', 'left',
                                                  'right') else img.height)
                        ) if key in ('left', 'top', 'right', 'bottom', 'width',
                                     'height') and isinstance(value, str)
                        and '%' in value else value
                        for key, value in self.crop.items()
                    })

            img.save(file=output_buffer)

            context.update(content_type=img.mimetype,
                           width=img.width,
                           height=img.height,
                           extension=guess_extension(img.mimetype))

        output_buffer.seek(0)
        descriptor.replace(output_buffer, position=0, **context)
예제 #38
0
파일: akari.py 프로젝트: wodim/akari
    def compose(self, image):
        utils.logger.info('Starting to compose Akari...')

        if 'akari:frames' not in cache:
            # cache miss
            utils.logger.warning('Akari frames were not warmed up!')
            self.warmup()

        akari_frames = cache.get('akari:frames')
        self.width = cache.get('akari:width')
        self.height = cache.get('akari:height')

        if self.type == 'animation' and len(akari_frames) < 2:
            # if we were asked to generate an animation but there's only one
            # mask, then we're generating a still image
            self.type = 'still'
        elif self.type == 'still' and len(akari_frames) > 1:
            # if we were asked to generate a still image and there are several
            # masks, use only the first one
            akari_frames = [akari_frames[0]]

        # now, get the background image
        filename = image.filename
        with Image(filename=filename) as original:
            # if it's an animation, take only the first frame
            if original.animation:
                bg_img = Image(original.sequence[0])
            else:
                bg_img = Image(original)
        # remove the alpha channel, if any
        bg_img.alpha_channel = False
        # resize it
        bg_img.transform(resize='{}x{}^'.format(self.width, self.height))
        bg_img.crop(width=self.width, height=self.height, gravity='center')

        if self.text:
            # generate the drawing to be applied to each frame
            if self.caption_type == 'seinfeld':
                caption, drawing = self._caption_seinfeld()
            elif self.caption_type == 'sanandreas':
                caption, drawing = self._caption_sanandreas()
            else:
                caption, drawing = self._caption_akari()
        else:
            caption, drawing = '', None

        result = Image()  # this will be the resulting image
        for akari_frame in akari_frames:
            # take the background image
            this_frame = Image(bg_img)

            # put akari on top of it
            this_frame.composite(akari_frame, left=0, top=0)

            if drawing:
                # draw the caption on this frame
                drawing(this_frame)

            if len(akari_frames) == 1:
                # we are done already
                result = Image(this_frame)
            else:
                # add the frame to the result image
                result.sequence.append(this_frame)
                with result.sequence[-1]:
                    result.sequence[-1].delay = 10
            # remove this frame from memory (it's in the sequence already)
            this_frame.close()
        if not akari_frames:
            # shortcut in case there's no mask
            this_frame = Image(bg_img)
            if drawing:
                drawing(this_frame)
            result = Image(this_frame)
            this_frame.close()

        # save the result
        filename = image.get_path(self.type)
        result.compression_quality = 100
        result.save(filename=filename)

        # destroy everything
        if drawing:
            drawing.destroy()
        for frame in result.sequence:
            frame.destroy()
        result.close()
        bg_img.close()

        try:
            # if the gif is too big, it has to be discarded. a new one
            # will be generated using a different image this time.
            if os.path.getsize(filename) > 3072 * 1024:
                raise AkariTooBigError('Composed an animation that is too big')
        except FileNotFoundError:
            # sometimes Wand fails to save the animation, and does not even
            # raise an exception. retry in this case.
            raise AkariWandIsRetardedError('Wand failed to save the animation')

        utils.logger.info('Akari composed and saved as "%s"', filename)
        self.filename = filename
        self.caption = caption
예제 #39
0
    lines[2] = min(difference_to_middle_bottom)

    return lines


#For all the png in the folder
for filename in glob.glob("*-t.png"):
    im = Image(filename=filename)
    #Save the widht and height of the thumbnail
    th = im.height
    tw = im.width
    #Crop the image
    l = crop_image(im)
    top = l[1]
    bottom = l[2]
    im.crop(0,l[1],im.width,l[2])
    #We rotate so we can use the same code!
    im.rotate(90)
    print im.height, im.width
    l = crop_image(im)
    im.crop(0,l[1],im.width,l[2])
    left = l[1]
    right = l[2]
    im.rotate(-90)
    #We open the original image so we can crop it too
    original = filename.replace('-t','')
    oim = Image(filename=original)
    ow = oim.width
    oh = oim.height
    oim.crop((left * ow) /tw,(top * oh) / th,(right * ow) / tw,(bottom * oh) / th)
    im.save(filename=filename+'crop.png')
예제 #40
0
'''

    End of configuration

'''

if DEBUG_DIR:
    os.mkdir(DEBUG_DIR)

pair_file = open(PAIR_FILE, 'a')
view_file = open(VIEW_FILE, 'a')

mask_input = None
if MASK_IMAGE_FILE:
    mask_input = Image(filename=MASK_IMAGE_FILE)
    mask_input.crop(CROP_LEFT, CROP_TOP, mask_input.width - CROP_RIGHT,
                    mask_input.height - CROP_BOTTOM)
    mask_input.rotate(ROTATE)

background = None
background_cv = None

for led_index in range(START_LED, TOTAL_LEDS):
    # resolve background
    potential_background = "%s/%05d-background.jpg" % (PHOTO_DIR, led_index)
    if os.path.isfile(potential_background):
        background = Image(filename=potential_background)
        background.crop(CROP_LEFT, CROP_TOP, background.width - CROP_RIGHT,
                        background.height - CROP_BOTTOM)
        background.rotate(ROTATE)
        if mask_input:
            background.composite(mask_input)
예제 #41
0
파일: drishti.py 프로젝트: neohex/Drishti
        print(img+" bad image")
        continue # If image is broken we continue our loop ignoring it
    else:
        orIm = Image(filename=img)
 
 
    # Read the image
    image = cv2.imread(img)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # opencv need black and white picture
 
    # Detect faces in the image
    faces = faceCascade.detectMultiScale(
        gray, # image for detection
        scaleFactor=1.1, # you can adjust this params to increase detection quality
        minNeighbors=5,  # and
        minSize=(30, 30) # this too
        #flags = cv2.CV_HAAR_SCALE_IMAGE
    )
 
 
    i=1 # counnter
 
    # Edit image and save it to data folder
    for (x, y, w, h) in faces:
        imgg=Image(filename=img)
        orIm.crop(x,y,x+w,y+h) # croping out face
        orIm.resize(256,256) # face image resizing
        orIm.save(filename=img.replace('images/','exit_data/'+str(i))) # save it to data folder
        orIm = Image(imgg) # if picture has more than 1 face reload it
        i+=1
예제 #42
0
def download_image(request,
                   datafile_id,
                   region,
                   size,
                   rotation,
                   quality,
                   format=None):  #@ReservedAssignment
    # Get datafile (and return 404 if absent)
    try:
        datafile = DataFile.objects.get(pk=datafile_id)
    except DataFile.DoesNotExist:
        return HttpResponseNotFound()

    is_public = datafile.is_public()
    if not is_public:
        # Check users has access to datafile
        if not has_datafile_download_access(request=request,
                                            datafile_id=datafile.id):
            return HttpResponseNotFound()

    buf = StringIO()
    try:
        file_obj = datafile.get_image_data()
        if file_obj is None:
            return HttpResponseNotFound()
        from contextlib import closing
        with closing(file_obj) as f:
            with Image(file=f) as img:
                if len(img.sequence) > 1:
                    img = Image(img.sequence[0])
                # Handle region
                if region != 'full':
                    x, y, w, h = map(int, region.split(','))
                    img.crop(x, y, width=w, height=h)
                # Handle size
                if size != 'full':
                    # Check the image isn't empty
                    if 0 in (img.height, img.width):
                        return _bad_request('size',
                                            'Cannot resize empty image')
                    # Attempt resize
                    if not _do_resize(img, size):
                        return _bad_request('size',
                                            'Invalid size argument: %s' % size)
                # Handle rotation
                if rotation:
                    img.rotate(float(rotation))
                # Handle quality (mostly by rejecting it)
                if quality not in ['native', 'color']:
                    return _get_iiif_error(
                        'quality',
                        'This server does not support greyscale or bitonal quality.'
                    )
                # Handle format
                if format:
                    mimetype = mimetypes.types_map['.%s' % format.lower()]
                    img.format = format
                    if mimetype not in ALLOWED_MIMETYPES:
                        return _invalid_media_response()
                else:
                    mimetype = datafile.get_mimetype()
                    # If the native format is not allowed, pretend it doesn't exist.
                    if mimetype not in ALLOWED_MIMETYPES:
                        return HttpResponseNotFound()
                img.save(file=buf)
                response = HttpResponse(buf.getvalue(), content_type=mimetype)
                response['Content-Disposition'] = \
                    'inline; filename="%s.%s"' % (datafile.filename, format)
                # Set Cache
                if is_public:
                    patch_cache_control(response, public=True, max_age=MAX_AGE)
                else:
                    patch_cache_control(response,
                                        private=True,
                                        max_age=MAX_AGE)
                return response
    except WandException:
        return HttpResponseNotFound()
    except ValueError:
        return HttpResponseNotFound()
    except IOError:
        return HttpResponseNotFound()
예제 #43
0
class Composer:
    def __init__(self, filename):
        self.filename = filename
        self.bg_img = None

    def load_image(self):
        with Image(filename=self.filename) as original:
            self.bg_img = Image(original)
        # remove the alpha channel, if any
        self.bg_img.alpha_channel = False

    def save_image(self, filename):
        self.bg_img.compression_quality = 100
        self.bg_img.save(filename=filename)
        return filename

    @staticmethod
    def generate_filename(filename, i=None):
        filename = filename.replace('/', '_')
        if i is not None:
            return 'tmp/masked_%s_%03d.jpg' % (filename, i)
        return 'tmp/masked_%s' % filename

    def compose_photo_visitelche(self):
        def clamp(number, minn, maxn):
            return max(min(maxn, number), minn)

        self.load_image()
        mask_img = Image(filename='assets/elche.png')
        mask_w = (self.bg_img.width / 2) * (self.bg_img.height /
                                            self.bg_img.width)
        mask_w = clamp(mask_w, self.bg_img.width / 2.5,
                       self.bg_img.width / 1.5)
        mask_w = int(mask_w)
        mask_h = self.bg_img.height
        mask_img.transform(resize='%dx%d' % (mask_w, mask_h))
        self.bg_img.composite(mask_img, left=self.bg_img.width - mask_w, top=0)

        return self.save_image(self.generate_filename(self.filename))

    def compose_photo_bulo(self):
        return self._compose_photo_simple(
            ['assets/bulo%d.png' % random.randint(1, 3)])

    def compose_photo_superbulo(self):
        return self._compose_photo_simple(
            ['assets/bulo%d.png' % i for i in range(1, 4)])

    def _compose_photo_simple(self, filenames):
        self.load_image()

        for filename in filenames:
            mask_img = Image(filename=filename)
            mask_img.transform(resize='%dx%d' %
                               (self.bg_img.width, self.bg_img.height))
            self.bg_img.composite(mask_img, gravity='center')

        return self.save_image(self.generate_filename(self.filename))

    def compose_file_visitelche(self):
        return self._compose_file_simple(FFMPEG_CMD_VISITELCHE)

    def compose_file_pescanova(self):
        return self._compose_file_simple(FFMPEG_CMD_PESCANOVA)

    def _compose_file_simple(self, cmd):
        file_dest = self.generate_filename(self.filename)

        subprocess.call(cmd.format(source=self.filename, dest=file_dest),
                        shell=True)
        if not os.path.exists(file_dest):
            raise ValueError('ffmpeg did not output anything')

        return file_dest

    async def compose_photo_alvise(self, text=None, **kwargs):
        self.load_image()

        frame = await self._compose_alvise(self.bg_img, text=text, count=1)
        return frame[0]

    async def compose_file_megaalvise(self, text=None, **kwargs):
        self.load_image()
        file_dest = self.generate_filename(self.filename) + '.mp4'
        glob = self.generate_filename(self.filename) + '_*.jpg'

        invalid_w, invalid_h = self.bg_img.width % 2 == 1, self.bg_img.height % 2 == 1
        if invalid_w or invalid_h:
            self.bg_img.crop(0,
                             0,
                             width=self.bg_img.width -
                             1 if invalid_w else self.bg_img.width,
                             height=self.bg_img.height -
                             1 if invalid_h else self.bg_img.height)

        await self._compose_alvise(self.bg_img,
                                   text=text,
                                   count=MEGAALVISE_FRAME_COUNT,
                                   callback=kwargs.get('callback'),
                                   callback_args=kwargs.get('callback_args'))

        if kwargs.get('callback') and kwargs.get('callback_args'):
            await kwargs.get('callback')(kwargs.get('callback_args')[0],
                                         MEGAALVISE_FRAME_COUNT,
                                         MEGAALVISE_FRAME_COUNT)

        cmd = FFMPEG_CMD_MEGAALVISE.format(source=glob, dest=file_dest)
        subprocess.call(cmd, shell=True)
        if not os.path.exists(file_dest):
            raise ValueError('ffmpeg did not output anything')

        return file_dest

    async def _compose_alvise(self, bg_img, text=None, count=1, **kwargs):
        with Drawing() as drawing:
            # fill the drawing primitives
            drawing.font = 'assets/HelveticaNeueLTCom-Md.ttf'
            drawing.gravity = 'north_west'
            drawing.fill_color = Color('#56fdb4')
            text = text if text else '@Alvisepf'

            # try to determine what a good font size would be
            string_list = text.split('\n')
            longest_string = len(max(string_list, key=len))
            line_count = len(string_list)
            drawing.font_size = max(
                min(bg_img.width / longest_string * 1.5,
                    bg_img.height / line_count * 1.5), 4)

            # the drawing has some padding so ascenders and descenders do not get truncated
            metrics = drawing.get_font_metrics(bg_img, text, '\n' in text)
            mask_w_orig, mask_h_orig = metrics.text_width, metrics.text_height + metrics.descender
            mask_w, mask_h = int(mask_w_orig * 1.02), int(mask_h_orig * 1.1)
            drawing.text(int((mask_w - mask_w_orig) / 2),
                         int((mask_h - mask_h_orig) / 2), text)

            # create a mask image to draw the text on to, and...
            with Image(width=mask_w, height=mask_h) as mask_img:
                # draw the text into the mask image
                drawing.draw(mask_img)
                original_mask_img = Image(mask_img)

                frames = []
                for i in range(count):
                    mask_img = Image(original_mask_img)
                    # rotate the mask
                    mask_img.rotate(random.uniform(-35, -5))
                    # calculate what a smaller background image would look like
                    scaling_factor = random.uniform(.5, .7)
                    bg_img_scaled_w = bg_img.width * scaling_factor
                    bg_img_scaled_h = bg_img.height * scaling_factor
                    # scale the mask to fit into that smaller background image
                    mask_img.transform(resize='%dx%d' %
                                       (bg_img_scaled_w, bg_img_scaled_h))
                    # calculate a random position inside the background image for it
                    offset_left = random.randint(0,
                                                 bg_img.width - mask_img.width)
                    offset_top = random.randint(
                        0, bg_img.height - mask_img.height)
                    # and put the mask in the image
                    bg_img.composite(mask_img,
                                     left=offset_left,
                                     top=offset_top)

                    frames.append(
                        self.save_image(
                            self.generate_filename(self.filename, i)))

                    if kwargs.get('callback') and kwargs.get('callback_args'):
                        await kwargs.get('callback')(
                            kwargs.get('callback_args')[0], i, count)

                original_mask_img.destroy()

        return frames
예제 #44
0
def generate(count):
    # ---------------get three colors-------------------------------
    colorString = random.choice(result)
    color = []
    for i in colorString:
        color += [int(i)]
    # for i in range(len(color)):
    #     color[i] = math.floor(color[i]/255*65535)
    color1 = color[0:3]
    color2 = color[3:6]
    color3 = color[6:9]
    # --------------------------------------------------------------

    # ----------get the base layer texture--------------------------
    Scenes = pathwalk('./SceneData')

    randomScene = random.choice(Scenes)
    randomScene = randomScene[0] + '/' + randomScene[1]
    print(randomScene)
    randomSceneImage = Image(filename=randomScene)

    widthRange = randomSceneImage.size[0] - 100
    heightRange = randomSceneImage.size[1] - 32

    randomSceneImage.crop(left=random.randint(0, widthRange),
                          top=random.randint(0, heightRange),
                          width=100,
                          height=32)
    # randomSceneImage.save(filename='.\\photoWand\\'+str(j+1) + '_texture.jpg')
    # --------------------------------------------------------------

    # ----------create the base layer, base texture +base color-----

    baseImage = Image(
        width=100,
        height=32,
        background=Color('rgb(' + str(color1[0]) + ',' + str(color1[1]) + ',' +
                         str(color1[2]) + ')'))

    # print('base_color = ' + 'rgb('+str(color1[0])+','+str(color1[1])+','+str(color1[2])+')')
    baseImage.composite_channel(channel='undefined',
                                image=randomSceneImage,
                                operator='blend',
                                left=0,
                                top=0)
    baseImage.gaussian_blur(4, 10)
    baseImage.resolution = (96, 96)
    # --------------------------------------------------------------

    # -----generate font--------------------------------------------
    word = randomWords()
    fonts = pathwalk('./fonts/font_en/')
    randomFont = random.choice(fonts)
    randomFont = randomFont[0] + randomFont[1]

    initialPointsize = 45

    draw = Drawing()
    draw.font = randomFont

    tmp = int(math.floor(abs(random.gauss(0, 1)) * 6))
    if random.randint(1, 2) == 1:
        rotateX = random.randint(0, tmp)
    else:
        rotateX = random.randint(360 - tmp, 360)

    draw.rotate(rotateX)
    # --------------------------------------------------------------
    # --------get suitable FontPointSize----------------------------
    draw.font_size = initialPointsize
    metric = draw.get_font_metrics(image=baseImage, text=word)

    while metric.text_width > 100 or metric.text_height > 36:
        initialPointsize -= 5
        draw.font_size = initialPointsize
        metric = draw.get_font_metrics(image=baseImage, text=word)
    # --------------------------------------------------------------

    # ----------italic----------------------------------------------
    if random.random() > 0.5:
        draw.font_style = 'italic'
    # --------------------------------------------------------------

    # ----------underline-------------------------------------------
    if random.random() > 0.5:
        draw.text_decoration = 'underline'
    # --------------------------------------------------------------

    # ----------gravity---------------------------------------------
    draw.gravity = 'center'
    # --------------------------------------------------------------

    # --------------shadow/border-----------------------------------
    if random.random() < 0.5:
        # shadow
        addx = math.ceil(random.gauss(0, 2))
        addy = math.ceil(random.gauss(0, 2))
        draw.fill_color = Color('black')
        draw.text(x=abs(int(addx)), y=abs(int(addy)), body=word)

    else:
        # border
        draw.stroke_color = Color('rgb(' + str(color3[0]) + ',' +
                                  str(color3[1]) + ',' + str(color3[2]) + ')')
        draw.stroke_width = math.ceil(initialPointsize / 10) - 1
    # --------------------------------------------------------------

    # ----------print word------------------------------------------
    draw.fill_color = Color('rgb(' + str(color2[0]) + ',' + str(color2[1]) +
                            ',' + str(color2[2]) + ')')
    draw.text(x=0, y=0, body=word)
    draw.draw(baseImage)
    # --------------------------------------------------------------

    # ------------gray----------------------------------------------
    baseImage.colorspace = 'gray'
    # --------------------------------------------------------------

    print(word)
    baseImage.save(filename='./photo_en/' + str(count + 1) + '_' + word +
                   '.jpg')
예제 #45
0
def resize_image(path=None, blob=None, img=None, name=None,
                 fmt='jpeg', auto_orient=True, upscale=False):
    """A coroutine that resizes a single image multiple times

    Note that the same image buffer is used across multiple operations so
    operations should be ordered from highest-quality to lowest.

    Parameters:
     - path/blob/img: Source data  (img is a Wand Image object)
     - name: The name of the file (for logging purposes only)
     - fmt: The image format of the resulting images (default: 'jpeg')
     - auto_orient: Automatically orient image before processing (default: true)
     - upscale: Upscale images to fit the desired resolution if they're too small
                (default: False)

    Receives a 4-tuple of (size, quality, square, fp)
     - size: The image will be resized so that the longest edge is this many pixels.
             If None the image will not be resized.
     - quality: The JPEG quality level
     - square: If true, the images will be cropped to square before being resized
     - fp: A file-like object to write() the result into
    """
    if sum([bool(path), bool(blob), bool(img)]) != 1:
        raise ValueError("One of 'path', 'blob', or 'img' is required")
    if path:
        img = Image(filename=path)
    elif blob:
        img = Image(blob=blob)

    with img:
        # If there are multiple frames only use the first one (this
        # prevents GIFs and ICOs from exploding their frames into
        # individual images)
        if img.sequence:
            for _ in range(1, len(img.sequence)):
                img.sequence.pop()

        # Rotation and conversion to desired output format
        if auto_orient:
            img.auto_orient()
        img.format = fmt

        while True:
            size, quality, square, fp = yield

            __log__.debug("[resizing] %s -> %s%s",
                name or "<img data>",
                "{}px".format(size) if size else "fullsize",
                ", square" if square else ""
            )

            if square:
                crop = min(img.size)
                img.crop(width=crop, height=crop, gravity='center')
                if size is not None and (upscale or size < crop):
                    img.resize(size, size)
            elif size is not None:
                # Work around a bug in Wand's image transformation by
                # manually calculating the scaled dimensions and resizing
                ratio = size/max(img.size)
                if upscale or ratio < 1:
                    img.resize(*[round(x*ratio) for x in img.size])

            img.compression_quality = quality

            try:
                img.save(file=fp)
            except IOError as e:
                __log__.error("[error] Failed to write image: %s", e, exc_info=True)
                raise