コード例 #1
0
    def __parse_image_data__(self, data):
        """
        Load the image from data with PIL
        """

        try:
            parser = ImageFile.Parser()
            parser.feed(data)
            self.__image__ = parser.close()
        except IOError:
            raise AlbumArtError('Error parsing albumart image data')

        try:
            self.__mimetype__ = PIL_MIME_MAP[self.__image__.format]
            if self.__mimetype__ is None:
                raise AlbumArtError('Error detecting image format')
        except KeyError:
            self.__image__ = None
            raise AlbumArtError('Unsupported PIL image format: {}'.format(
                self.__image__.format))

        if self.__image__.mode != 'RGB':
            self.__image__ = self.__image__.convert('RGB')
コード例 #2
0
    def merge(self):
        # This function will merge every layer together

        # Create a batch list
        batch = sorted(self.result.keys())
        # Converte bytes to image object
        images = []
        for i in batch:
            # Create parser
            p = ImageFile.Parser()
            # Feed data to parser
            self.result[i].seek(0)
            p.feed(self.result[i].read())
            # Transform data in a image object
            images.append(p.close())
        # Merge each layer together
        bottomImage = images[0].convert("RGBA")

        for i in range(1, len(images)):
            # merge
            bottomImage = Image.alpha_composite(bottomImage,
                                                images[i].convert("RGBA"))
        return bottomImage
コード例 #3
0
ファイル: utils.py プロジェクト: SantiagoYoung/aidingwang
def upload_image(_file, path="/business_licence/%Y/%m/%d/"):
    """
    图片上传函数
    :param _file:
    :param path: eg: "/business_licence/%Y/%m/%d/"
    :return:
    """
    if _file:
        path = os.path.join(settings.MEDIA_ROOT + path, 'upload')
        file_name = str(uuid.uuid1()) + ".jpg"
        path_file = os.path.join(path, file_name)
        parser = ImageFile.Parser()
        for chunk in _file.chunks():
            parser.feed(chunk)
        img = parser.close()
        try:
            if img.mode != "RGB":
                img = img.convert("RGB")
            img.save(path_file, 'jpeg', quality=100)
        except Exception as e:
            return False
        return True
    return False
コード例 #4
0
    def handleResponse(self,request,data):
        try:
            isImage = getattr(request,'isImage')
        except AttributeError:
            isImage = False

        if isImage:
            try:
                image_type=request.imageType
                #For some reason more images get parsed using the parser
                #rather than a file...PIL still needs some work I guess
                p = ImageFile.Parser()
                p.feed(data)
                im = p.close()
                im=im.transpose(Image.ROTATE_180)
                output = StringIO()
                im.save(output,format=image_type)
                data=output.getvalue()
                output.close()
                logging.info("Flipped image")
            except Exception as e:
                print "Error: %s" % e
        return {'request':request,'data':data}
コード例 #5
0
ファイル: thumber.py プロジェクト: jeffisabelle/reading.pub
def get_sizes(uri):
    """
    http://stackoverflow.com/questions/8915296/python-image-library-fails-with-message-decoder-jpeg-not-available-pil

    needs libjpg etc on system-wide
    """
    # get file size *and* image size (None if not known)
    file = urllib.urlopen(uri)
    size = file.headers.get("content-length")
    if size:
        size = int(size)

    p = ImageFile.Parser()
    while 1:
        data = file.read(1024)
        if not data:
            break
        p.feed(data)
        if p.image:
            return size, p.image.size
            break
    file.close()
    return size, None
コード例 #6
0
ファイル: urlAnalyzer.py プロジェクト: winstonll/barima
def getsizes(url):
    req = Request(url)
    try:
        file = urlopen(req, timeout=10)
    except:
        return (0)
    else:
        size = file.headers.get("content-length")
        if size: size = int(size)
        p = ImageFile.Parser()
        while 1:
            data = file.read(1024)
            if not data:
                break
            try:
                p.feed(data)
            except:
                return (0)
            if p.image:
                return p.image.size[0] * p.image.size[1]
                break
        file.close()
        return 0
コード例 #7
0
def __compress_illust(data: bytes) -> bytes:
    """
    压缩图片(图片以bytes形式传递)
    :param data: 图片
    """
    p = ImageFile.Parser()
    p.feed(data)
    img = p.close()

    w, h = img.size
    if w > compress_size or h > compress_size:
        ratio = min(compress_size / w, compress_size / h)
        img_cp = img.resize((int(ratio * w), int(ratio * h)), Image.ANTIALIAS)
    else:
        img_cp = img.copy()
    img_cp = img_cp.convert("RGB")

    with BytesIO() as bio:
        img_cp.save(bio,
                    format="JPEG",
                    optimize=True,
                    quantity=compress_quantity)
        return bio.getvalue()
コード例 #8
0
def getsizes(uri):
    # get file size *and* image size (None if not known)

    file = urllib.urlopen(uri)
    size = file.headers.get("content-length")
    if size: size = int(size)
    p = ImageFile.Parser()
    while 1:
        data = file.read(1024)
        if not data:
            break
        p.feed(data)
        if p.image:
            return size, p.image.size
            break
    file.close()
    return size, None


#parse('http://www.huffingtonpost.com/entry/david-cameron-dodgy_us_570bf446e4b0885fb50dc004')
#parse('http://www.huffingtonpost.com/entry/ted-cruz-gold-standard-republican_us_571196bfe4b06f35cb6fbac6?cps=gravity_2425_-8385480002285021224')

#parse('http://www.theblaze.com/stories/2016/04/12/trump-blasts-rnc-chairman-reince-priebus-should-be-ashamed-of-himself/')
コード例 #9
0
def get_image_size(url):
    """
    Gets the dimensions of a web image. url must be a direct link to the image,
    currently little support around this. Will timeout if 
    
    Args:
        url (string): The url of the hosted image
    
    Returns:
        tuple(float, float): (image width, image height). 
        on failure: (None, None).
    """
    width = height = None
    try:
        file = urllib2.urlopen(url)
    except:
        print("urllib2.urlopen failed.", end='')
        return width, height
    try:
        p = ImageFile.Parser()
    except:
        print("ImageFile.Parser failed.", end='')
        return width, height

    while 1:
        data = file.read(1024)
        if not data:
            print('EOF reached.', end='')
            break
        p.feed(data)
        if p.image:
            w, h = p.image.size
            width = float(w)
            height = float(h)
            break
    file.close()
    return width, height
コード例 #10
0
ファイル: functions.py プロジェクト: wynick27/calibre
def getimagesize(url):
    """
    Attempts to determine an image's width and height, and returns a string
    suitable for use in an <img> tag, or None in case of failure.
    Requires that PIL is installed.

    >>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif")
    ... #doctest: +ELLIPSIS, +SKIP
    'width="..." height="..."'

    """

    try:
        from PIL import ImageFile
    except ImportError:
        try:
            import ImageFile
        except ImportError:
            return None

    try:
        import urllib2
    except ImportError:
        return None

    try:
        p = ImageFile.Parser()
        f = urllib2.urlopen(url)
        while True:
            s = f.read(1024)
            if not s:
                break
            p.feed(s)
            if p.image:
                return 'width="%i" height="%i"' % p.image.size
    except (IOError, ValueError):
        return None
コード例 #11
0
ファイル: helpers.py プロジェクト: zhaoshiling1017/flaskbb
def get_image_info(url):
    """Returns the content-type, image size (kb), height and width of
    an image without fully downloading it.

    :param url: The URL of the image.
    """
    r = requests.get(url, stream=True)
    image_size = r.headers.get("content-length")
    image_size = float(image_size) / 1000  # in kilobyte
    image_max_size = 10000
    image_data = {
        "content_type": "",
        "size": image_size,
        "width": 0,
        "height": 0
    }

    # lets set a hard limit of 10MB
    if image_size > image_max_size:
        return image_data

    data = None
    parser = ImageFile.Parser()

    while True:
        data = r.raw.read(1024)
        if not data:
            break

        parser.feed(data)
        if parser.image:
            image_data["content_type"] = parser.image.format
            image_data["width"] = parser.image.size[0]
            image_data["height"] = parser.image.size[1]
            break

    return image_data
コード例 #12
0
    def response(self, response, request, data):
        try:
            isImage = getattr(request, 'isImage')
        except AttributeError:
            isImage = False

        if isImage:
            try:
                #For some reason more images get parsed using the parser
                #rather than a file...PIL still needs some work I guess
                p = ImageFile.Parser()
                p.feed(data)
                im = p.close()
                im = im.transpose(Image.ROTATE_180)
                output = StringIO()
                im.save(output, format=self.imageType)
                data = output.getvalue()
                output.close()
                self.clientlog.info("Flipped image", extra=request.clientInfo)
            except Exception as e:
                self.clientlog.info("Error: {}".format(e),
                                    extra=request.clientInfo)

        return {'response': response, 'request': request, 'data': data}
コード例 #13
0
def listdata(request):
    """展示文章列表,更改前端展示图片"""
    articleall = models.Article.objects.order_by("-Article_time")
    banner = models.banners.objects.all()
    VisitNumber = models.VisitNumber.objects.all()
    Userip = models.Userip.objects.all()
    DayNumber = models.DayNumber.objects.all()
    try:
        res = request.GET.get("del")
        print "res--->{}".format(res)
        article = models.Article.objects.get(id=res).delete()
    except:
        pass

    if request.method == "POST":
        f1 = request.FILES['testimg']
        fid = request.POST.get("id")
        parser = ImageFile.Parser()
        file_suffix = f1.name.split('.')[-1]
        f1.name = str(uuid.uuid1()) + '.' + file_suffix
        for chunk in f1.chunks():
            parser.feed(chunk)
        img = parser.close()
        name = '%s%s' % (settings.MEDIA_ROOT, f1.name)  ##这里的时保存文件的路径加名字!

        sqlNamePhone = "/static/media/" + f1.name
        img.save(name)
        p = models.Article.objects.get(id=fid)
        p.Article_images = sqlNamePhone
        p.save()

        return render(request, "listdata.html", locals())

    else:

        return render(request, "listdata.html", locals())
コード例 #14
0
ファイル: view.py プロジェクト: torymac1/facetimes
def _upload(file):
    '''Image Uploading and Storing'''
    if file:
        path = os.path.join(settings.MEDIA_ROOT, 'static/upload')
        file_name = str(uuid.uuid1()) + ".jpg"
        path_file = os.path.join(path, file_name)

        parser = ImageFile.Parser()
        for chunk in file.chunks():
            parser.feed(chunk)
        try:
            img = parser.close()
        except OSError:
            return 2, 'Not an image.'

        try:
            if img.mode != "RGB":
                img = img.convert("RGB")
            img.save(path_file, 'jpeg', quality=100)
        except Exception as e:
            print(str(e))
            return 3, 'Cannot save as jpg file.'
        return 1, path_file
    return 4, 'No file attached.'
コード例 #15
0
def is_hd(url, min_width, min_height):
    """
    Returns false if image from URL is not HD (Specified by min-/max_width)
    """
    file = urllib.request.urlopen(url)
    size = file.headers.get("content-length")
    if size:
        size = int(size)
    parser = ImageFile.Parser()

    while 1:
        data = file.read(1024)
        if not data:
            break
        parser.feed(data)
        if parser.image:
            # return p.image.size
            if parser.image.size[0] >= min_width and parser.image.size[
                    1] >= min_height:
                return True
            return False

    file.close()
    return False
コード例 #16
0
ファイル: search_engine.py プロジェクト: ipazc/oculus-crawl
    def _get_url_size(self, url):
        """
        Request the size (width and height) of a URL image.
        :param url:
        :return: [width, height]
        """
        size = [0, 0]

        with urllib.request.urlopen(url) as file:
            image_parser = ImageFile.Parser()

            while True:
                data = file.read(1024)

                if not data:
                    break

                image_parser.feed(data)

                if image_parser.image:
                    size = image_parser.image.size
                    break

        return size
コード例 #17
0
ファイル: views.py プロジェクト: plutokamin/pujia8
def make_avatar(file_obj):
    import time, random
    fn = time.strftime('%Y%m%d%H%M%S')
    fn = fn + '_%d' % random.randint(0, 100)

    url = 'static/avatars/%s.jpg' % fn
    from PIL import Image, ImageFile
    parser = ImageFile.Parser()
    for chunk in file_obj.chunks():
        parser.feed(chunk)
    img = parser.close()
    w, h = img.size
    if w != 80 or h != 80:
        if w > h:
            diff = (w - h) / 2
            img = img.crop((diff, 0, w - diff, h))
        else:
            diff = (h - w) / 2
            img = img.crop((0, diff, w, h - diff))
        img = img.resize((80, 80), Image.ANTIALIAS)
        if img.mode != "RGB":
            img = img.convert("RGB")
    img.save(url, quality=90)
    return url[7:]
コード例 #18
0
ファイル: test_imagefile.py プロジェクト: chris34/pillow
 def test_raise_typeerror(self):
     with pytest.raises(TypeError):
         parser = ImageFile.Parser()
         parser.feed(1)
コード例 #19
0
def volunteer_twitter(request, urlname, token):
    try:
        conference = Conference.objects.select_related('series').get(
            urlname=urlname)
    except Conference.DoesNotExist:
        raise Http404()

    if not conference.has_social_broadcast:
        raise Http404()

    reg = get_object_or_404(ConferenceRegistration,
                            conference=conference,
                            regtoken=token)
    if conference.administrators.filter(pk=reg.attendee_id).exists(
    ) or conference.series.administrators.filter(pk=reg.attendee_id):
        is_admin = True
        canpost = conference.twitter_postpolicy != 0
        canpostdirect = conference.twitter_postpolicy != 0
        canmoderate = conference.twitter_postpolicy in (2, 3)
    elif not conference.volunteers.filter(pk=reg.pk).exists():
        raise Http404()
    else:
        is_admin = False
        canpost = conference.twitter_postpolicy >= 2
        canpostdirect = conference.twitter_postpolicy == 4
        canmoderate = conference.twitter_postpolicy == 3

    providers = ProviderCache()

    if request.method == 'POST':
        if request.POST.get('op', '') == 'post':
            approved = False
            approvedby = None
            if is_admin:
                if conference.twitter_postpolicy == 0:
                    raise PermissionDenied()

                # Admins can use the bypass parameter to, well, bypass
                if request.POST.get('bypass', '0') == '1':
                    approved = True
                    approvedby = reg.attendee
            else:
                if conference.twitter_postpolicy in (0, 1):
                    raise PermissionDenied()

                if conference.twitter_postpolicy == 4:
                    # Post without approval for volunteers
                    approved = True
                    approvedby = reg.attendee

            # Check if we have *exactly the same tweet* in the queue already, in the past 5 minutes.
            # in which case it's most likely a clicked-too-many-times.
            if ConferenceTweetQueue.objects.filter(
                    conference=conference,
                    contents=request.POST['txt'],
                    author=reg.attendee,
                    datetime__gt=timezone.now() -
                    datetime.timedelta(minutes=5)):
                return _json_response({'error': 'Duplicate post detected'})

            # Now insert it in the queue, bypassing time validation since it's not an automatically
            # generated tweet.
            t = ConferenceTweetQueue(
                conference=conference,
                contents=request.POST['txt'][:280],
                approved=approved,
                approvedby=approvedby,
                author=reg.attendee,
                replytotweetid=request.POST.get('replyid', None),
            )
            if 'image' in request.FILES:
                t.image = request.FILES['image'].read()
                # Actually validate that it loads as PNG or JPG
                try:
                    p = ImageFile.Parser()
                    p.feed(t.image)
                    p.close()
                    image = p.image
                    if image.format not in ('PNG', 'JPEG'):
                        return _json_response({
                            'error':
                            'Image must be PNG or JPEG, not {}'.format(
                                image.format)
                        })
                except Exception as e:
                    return _json_response({'error': 'Failed to parse image'})

                MAXIMAGESIZE = 1 * 1024 * 1024
                if len(t.image) > MAXIMAGESIZE:
                    # Image is bigger than 4Mb, but it is a valid image, so try to rescale it
                    # We can't know exactly how to resize it to get it to the right size, but most
                    # likely if we cut the resolution by n% the filesize goes down by > n% (usually
                    # an order of magnitude), so we base it on that and just fail again if that didn't
                    # work.
                    rescalefactor = MAXIMAGESIZE / len(t.image)
                    newimg = image.resize((int(image.size[0] * rescalefactor),
                                           int(image.size[1] * rescalefactor)),
                                          Image.ANTIALIAS)
                    b = io.BytesIO()
                    newimg.save(b, image.format)
                    t.image = b.getvalue()
                    if len(t.image) > MAXIMAGESIZE:
                        return _json_response({
                            'error':
                            'Image file too big and automatic resize failed'
                        })

            t.save()
            if request.POST.get('replyid', None):
                orig = ConferenceIncomingTweet.objects.select_related(
                    'provider').get(conference=conference,
                                    statusid=get_int_or_error(
                                        request.POST, 'replyid'))
                orig.processedat = timezone.now()
                orig.processedby = reg.attendee
                orig.save()
                # When when replying to a tweet, it goes to the original provider *only*
                t.remainingtosend.set([orig.provider])

            return _json_response({})
        elif request.POST.get('op', None) in ('approve', 'discard'):
            if not is_admin:
                # Admins can always approve, but volunteers only if policy allows
                if conference.twitter_postpolicy != 3:
                    raise PermissionDenied()

            try:
                t = ConferenceTweetQueue.objects.get(conference=conference,
                                                     approved=False,
                                                     pk=get_int_or_error(
                                                         request.POST, 'id'))
            except ConferenceTweetQueue.DoesNotExist:
                return _json_response({'error': 'Tweet already discarded'})
            if t.approved:
                return _json_response(
                    {'error': 'Tweet has already been approved'})

            if request.POST.get('op') == 'approve':
                if t.author == reg.attendee:
                    return _json_response(
                        {'error': "Can't approve your own tweets"})

                t.approved = True
                t.approvedby = reg.attendee
                t.save()
                trigger_immediate_job_run('twitter_post')
            else:
                t.delete()
            return _json_response({})
        elif request.POST.get('op', None) in ('dismissincoming', 'retweet'):
            if not is_admin:
                # Admins can always approve, but volunteers only if policy allows
                if conference.twitter_postpolicy != 3:
                    raise PermissionDenied()

            try:
                t = ConferenceIncomingTweet.objects.get(
                    conference=conference,
                    statusid=get_int_or_error(request.POST, 'id'))
            except ConferenceIncomingTweet.DoesNotExist:
                return _json_response({'error': 'Tweet does not exist'})

            if request.POST.get('op', None) == 'dismissincoming':
                if t.processedat:
                    return _json_response(
                        {'error': 'Tweet is already dismissed or replied'})

                t.processedby = reg.attendee
                t.processedat = timezone.now()
                t.save(update_fields=['processedby', 'processedat'])
            else:
                if t.retweetstate > 0:
                    return _json_response({'error': 'Tweet '})
                t.retweetstate = 1
                t.save(update_fields=['retweetstate'])
                trigger_immediate_job_run('twitter_post')

            return _json_response({})
        else:
            # Unknown op
            raise Http404()

    # GET request here
    if request.GET.get('op', None) == 'queue':
        # We show the queue to everybody, but non-moderators don't get to approve

        # Return the approval queue
        queue = ConferenceTweetQueue.objects.defer(
            'image',
            'imagethumb').filter(conference=conference, approved=False).extra(
                select={
                    'hasimage': "image is not null and image != ''"
                }).order_by('datetime')

        # Return the latest ones approved
        latest = ConferenceTweetQueue.objects.defer(
            'image',
            'imagethumb').filter(conference=conference, approved=True).extra(
                select={
                    'hasimage': "image is not null and image != ''"
                }).order_by('-datetime')[:5]

        def _postdata(objs):
            return [{
                'id': t.id,
                'txt': t.contents,
                'author': t.author and t.author.username or '',
                'time': t.datetime,
                'hasimage': t.hasimage,
                'delivered': t.sent,
            } for t in objs]

        return _json_response({
            'queue': _postdata(queue),
            'latest': _postdata(latest),
        })
    elif request.GET.get('op', None) == 'incoming':
        incoming = ConferenceIncomingTweet.objects.select_related(
            'provider').filter(conference=conference,
                               processedat__isnull=True).order_by('created')
        latest = ConferenceIncomingTweet.objects.select_related(
            'provider').filter(
                conference=conference,
                processedat__isnull=False).order_by('-processedat')[:5]

        def _postdata(objs):
            return [{
                'id':
                str(t.statusid),
                'txt':
                t.text,
                'author':
                t.author_screenname,
                'authorfullname':
                t.author_name,
                'time':
                t.created,
                'rt':
                t.retweetstate,
                'provider':
                t.provider.publicname,
                'media': [m for m in t.media if m is not None],
                'url':
                providers.get(t.provider).get_public_url(t),
                'replymaxlength':
                providers.get(t.provider).max_post_length,
            } for t in objs.annotate(
                media=ArrayAgg('conferenceincomingtweetmedia__mediaurl'))]

        return _json_response({
            'incoming': _postdata(incoming),
            'incominglatest': _postdata(latest),
        })
    elif request.GET.get('op', None) == 'hasqueue':
        return _json_response({
            'hasqueue':
            ConferenceTweetQueue.objects.filter(
                conference=conference,
                approved=False).exclude(author=reg.attendee_id).exists(),
            'hasincoming':
            ConferenceIncomingTweet.objects.filter(
                conference=conference, processedat__isnull=True).exists(),
        })
    elif request.GET.get('op', None) == 'thumb':
        # Get a thumbnail -- or make one if it's not there
        t = get_object_or_404(ConferenceTweetQueue,
                              conference=conference,
                              pk=get_int_or_error(request.GET, 'id'))
        if not t.imagethumb:
            # Need to generate a thumbnail here. Thumbnails are always made in PNG!
            p = ImageFile.Parser()
            p.feed(bytes(t.image))
            p.close()
            im = p.image
            im.thumbnail((256, 256))
            b = io.BytesIO()
            im.save(b, "png")
            t.imagethumb = b.getvalue()
            t.save()

        resp = HttpResponse(content_type='image/png')
        resp.write(bytes(t.imagethumb))
        return resp

    # Maximum length from any of the configured providers
    providermaxlength = {
        m.provider.publicname: providers.get(m.provider).max_post_length
        for m in ConferenceMessaging.objects.select_related('provider').filter(
            conference=conference, broadcast=True, provider__active=True)
    }

    return render(
        request, 'confreg/twitter.html', {
            'conference':
            conference,
            'reg':
            reg,
            'poster':
            canpost and 1 or 0,
            'directposter':
            canpostdirect and 1 or 0,
            'moderator':
            canmoderate and 1 or 0,
            'providerlengths':
            ", ".join(
                ["{}: {}".format(k, v) for k, v in providermaxlength.items()]),
            'maxlength':
            max((v for k, v in providermaxlength.items())),
        })
コード例 #20
0
    def LocalMapTileHandler(self, handler, is_imagery, layer_id):
        """Handle requests for map imagery or vector jpeg tiles."""
        if not handler.IsValidRequest():
            raise tornado.web.HTTPError(404)

        x = int(handler.request.arguments["x"][0])
        y = int(handler.request.arguments["y"][0])
        z = int(handler.request.arguments["z"][0])
        if tornado.web.globe_.IsMbtiles() and is_imagery:
            handler.write(tornado.web.globe_.ReadMapImageryTile(x, y, z))
            return

        qtnode = self.ConvertToQtNode(x, y, z)
        channel = int(handler.request.arguments["channel"][0])
        try:
            if is_imagery:
                handler.write(
                    tornado.web.globe_.ReadMapImageryPacket(
                        qtnode, channel, layer_id))
                return
            else:
                handler.write(
                    tornado.web.globe_.ReadMapVectorPacket(
                        qtnode, channel, layer_id))
                return
        except:
            # Super-sample tiles where there is missing imagery data.
            if (not is_imagery
                    or not tornado.web.globe_.IsBaseLayer(layer_id, channel)):
                return

            if (not pil_enabled
                    or not tornado.web.globe_.config_.FillMissingMapTiles()):
                if tornado.web.globe_.IsBaseLayer(layer_id, channel):
                    handler.write(self.empty_tile_)
                return

            length = len(qtnode)
            max_index = min(
                length, tornado.web.globe_.config_.MaxMissingMapTileAncestor())
            index = 1
            size = 128
            xoff = 0
            yoff = 0
            # Use shift since we are moving right to left off address.
            shift = 1
            while index < max_index:
                try:
                    if size > 0:
                        last_node = qtnode[length - index]
                        if last_node == "0":
                            yoff += shift
                        elif last_node == "1":
                            yoff += shift
                            xoff += shift
                        elif last_node == "2":
                            xoff += shift
                        elif last_node == "3":
                            pass
                        else:
                            raise (Exception("Unexpected qtnode: %s" %
                                             qtnode[:length - index]))

                    parser = ImageFile.Parser()
                    parser.feed(
                        tornado.web.globe_.ReadMapImageryPacket(
                            qtnode[:length - index], channel, layer_id))
                    image = parser.close()
                    # Skip 1 x 1 placeholders
                    if image.size[0] == 1:
                        if tornado.web.globe_.IsBaseLayer(layer_id, channel):
                            handler.write(self.empty_tile_)
                        return

                    xoffset = xoff * size
                    yoffset = yoff * size
                    image = image.crop((xoffset, yoffset, xoffset + size,
                                        yoffset + size)).resize((256, 256))
                    output = StringIO.StringIO()
                    # If has palette, use PNG.
                    if image.mode == "P":
                        image.save(output, "PNG")
                    else:
                        image.save(output, "JPEG")

                    content = output.getvalue()
                    output.close()
                    handler.write(content)
                    return
                except portable_globe.UnableToFindException, e:
                    index += 1
                    if size > 1:
                        size >>= 1  # divide by 2
                        shift <<= 1  # multiply by 2
                    else:
                        # If we get down to a single pixel,
                        # keep cutting the resolution of the
                        # offset in half to grab the "best"
                        # pixel.
                        xoff >>= 1  # divide by 2
                        yoff >>= 1  # divide by 2

            # Unable to find a near enough ancestor; show "no data".
            if tornado.web.globe_.IsBaseLayer(layer_id, channel):
                handler.write(self.empty_tile_)
コード例 #21
0
def photo_handler():
    """
    Main function for photo handling.

    :return:
    """
    # Get directory names
    source_dirname = app.config.get("SOURCE_FOLDER")
    original_dirname = app.config["ORIGINAL_FOLDER"]
    medium_dirname = app.config["MEDIUM_FOLDER"]
    small_dirname = app.config["SMALL_FOLDER"]
    # Connect to pcloud and get directory structure
    pcloud = pcloud_handler.PcloudHandler()
    public_cloud_id = pcloud.get_public_cloud_id()
    # Get folders from Public Folder
    subdirs, _ = pcloud.folder_contents(public_cloud_id)
    # Directory names to folder IDs - remove trailing slashes from directory names
    if source_dirname:
        source_folderid = subdirs[source_dirname[:-1]]["folderid"]
    else:
        source_folderid = public_cloud_id
    original_folderid = subdirs[original_dirname[:-1]]["folderid"]
    medium_folderid = subdirs[medium_dirname[:-1]]["folderid"]
    small_folderid = subdirs[small_dirname[:-1]]["folderid"]
    # Collect files from source directory
    _, files = pcloud.folder_contents(source_folderid)
    # Only handle accepted file types
    accepted_types = [".JPG", ".jpg"]
    files = [files[file] for file in files if Path(file).suffix in accepted_types]
    for filedata in files:
        file = filedata["name"]
        fileid = filedata["fileid"]
        app.logger.debug("Working on file {}".format(file))
        # Get file contents and convert to an image - also required to get exif for date and time of picture taken.
        content = pcloud.get_content(filedata)
        app.logger.debug("File {} length: {} (expected: {})".format(file, len(content), filedata["size"]))
        parser = ImageFile.Parser()
        parser.feed(content)
        img = parser.close()
        # Get exif information from picture
        exif = get_labeled_exif(file, img)
        app.logger.debug("EXIF: {}".format(exif))
        # Calculate new filename including date/time picture taken
        created_dt = get_created_datetime(filedata, exif)
        fn = get_filename(file, created_dt)
        create_node(fn, file, created_dt)
        # Move file to Original directory
        pcloud.movefile(fileid, original_folderid, fn)
        # Create medium image
        medium_img = to_medium(img)
        if isinstance(exif, dict):
            try:
                medium_img = rotate_image(medium_img, exif["Orientation"])
            except KeyError:
                app.logger.info("{} ({}) no Orientation in exif data".format(file, fn))
        medium_ffn = os.path.join(os.getenv('LOGDIR'), fn)
        medium_img.save(medium_ffn)
        res = pcloud.upload_file(fn, medium_ffn, medium_folderid)
        app.logger.info("File {} medium format loaded, result: {}".format(fn, res["result"]))
        os.remove(medium_ffn)
        # Create small image
        small_img = to_small(medium_img)
        small_ffn = os.path.join(os.getenv('LOGDIR'), fn)
        small_img.save(small_ffn)
        res = pcloud.upload_file(fn, small_ffn, small_folderid)
        app.logger.info("File {} small format loaded, result: {}".format(fn, res["result"]))
        os.remove(small_ffn)
    pcloud.close_connection()
    nr_files = len(files)
    app.logger.info("{} pictures have been processed.".format(nr_files))
    return nr_files
コード例 #22
0
def get_image_format(data):
    p = PILImageFile.Parser()
    p.feed(data)

    return p.close().format
コード例 #23
0
def make_thumb(item,
               collection,
               interrupt_fn=None,
               force=False,
               cache=None,
               use_embedded=False,
               write_to_cache=True):
    '''
    create a thumbnail from the original image using either PIL or dcraw
    interrupt_fn = callback that returns False if routine should cancel (not implemented)
    force = True if thumbnail should be recreated even if already present
    affects thumb, thumburi members of item
    '''
    itemfile = collection.get_path(item)
    thumb_pb = None
    if cache == None and thumb_factory.has_valid_failed_thumbnail(
            itemfile, int(item.mtime)):
        if not force:
            item.thumb = False
            return False
        print 'Forcing thumbnail creation'
        uri = io.get_uri(itemfile)
        thumb_uri = thumb_factory.lookup(uri, int(item.mtime))
        if write_to_cache and thumb_uri:
            os.remove(thumb_uri)
    if not force and item.thumb == False:
        return False
    delete_thumb(item)
    ##todo: could also try extracting the thumb from the image (essential for raw files)
    ## would not need to make the thumb in that case
    print 'Creating thumbnail for', item.uid, itemfile
    t = time.time()
    try:
        uri = io.get_uri(itemfile)
        mimetype = io.get_mime_type(itemfile)
        thumb_pb = None
        if mimetype.lower().startswith('video'):
            cmd = settings.video_thumbnailer % (itemfile, )
            imdata = os.popen(cmd).read()
            image = Image.open(StringIO.StringIO(imdata))
            image.thumbnail(
                (128, 128),
                Image.ANTIALIAS)  ##TODO: this is INSANELY slow -- find out why
        else:
            try:
                mime = io.get_mime_type(itemfile)
                if use_embedded and load_embedded_thumb(item, collection):
                    thumb_pb = item.thumb
                    image = None
                    print 'Used embedded thumb'
                elif not settings.is_windows and mime in gdk_mime_types:  #todo: this is completely broken on windows
                    thumb_pb = gtk.gdk.pixbuf_new_from_file_at_size(
                        itemfile, 128, 128)
                    thumb_pb = orient_pixbuf(thumb_pb, item.meta)
                    image = None
                    print 'Opened with GDK'
                else:
                    image = Image.open(itemfile)
                    image.thumbnail((128, 128), Image.ANTIALIAS)
                    print 'Opened with PIL'
            except:
                cmd = settings.dcraw_cmd % (itemfile, )
                imdata = os.popen(cmd).read()
                if not imdata or len(imdata) < 100:
                    cmd = settings.dcraw_backup_cmd % (itemfile, )
                    imdata = os.popen(cmd).read()


#                pipe = subprocess.Popen(cmd, shell=True,
#                        stdout=PIPE) ##, close_fds=True
#                print pipe
#                pipe=pipe.stdout
#                print 'pipe opened'
#                imdata=pipe.read()
#                print 'pipe read'
                p = ImageFile.Parser()
                p.feed(imdata)
                image = p.close()
                image.thumbnail((128, 128), Image.ANTIALIAS)
                image = orient_image(image, item.meta)
                print 'Opened with DCRAW'
        if image is not None:
            thumb_pb = image_to_pixbuf(image)
        if thumb_pb is None:
            raise TypeError
    except:
        item.thumb = False
        item.thumburi = None
        if write_to_cache and cache == None:
            thumb_factory.create_failed_thumbnail(itemfile, int(item.mtime))
        print 'Error creating thumbnail for', item
        import sys
        import traceback
        tb_text = traceback.format_exc(sys.exc_info()[2])
        print tb_text
        return False
    width = thumb_pb.get_width()
    height = thumb_pb.get_height()
    uri = io.get_uri(itemfile)
    #save the new thumbnail
    try:
        if write_to_cache:
            if cache == None:
                thumb_factory.save_thumbnail(thumb_pb, uri, int(item.mtime))
                item.thumburi = thumb_factory.lookup(uri, int(item.mtime))
            else:
                if not os.path.exists(cache):
                    os.makedirs(cache)
                item.thumburi = os.path.join(
                    cache, muuid(item.uid + str(int(item.mtime)))) + '.png'
                thumb_pb.save(item.thumburi, "png")
            print 'cached at', item.thumburi
    except:
        print 'Error caching thumbnail for', item
        import sys
        import traceback
        tb_text = traceback.format_exc(sys.exc_info()[2])
        print tb_text
        item.thumb = False
        item.thumburi = None
        if write_to_cache and cache == None:
            thumb_factory.create_failed_thumbnail(itemfile, int(item.mtime))
        return False
    item.thumb = thumb_pb
    cache_thumb_in_memory(item)
    return True
コード例 #24
0
def pil_from_CompressedImage(msg):
    from PIL import ImageFile  # @UnresolvedImport
    parser = ImageFile.Parser()
    parser.feed(msg.data)
    res = parser.close()
    return res
コード例 #25
0
    root = '/data_new/zxbsmk/moire/trainData'
    # you need to clean the training set
    input_path = os.path.join(root, 'source')
    gt_path = os.path.join(root, 'target')
    input_imgs = [
        os.path.join(input_path, img) for img in os.listdir(input_path)
    ]
    gt_imgs = [os.path.join(gt_path, img) for img in os.listdir(gt_path)]
    input_imgs.sort()
    gt_imgs.sort()

    cot = 0
    loop = tqdm(enumerate(input_imgs), total=len(input_imgs), leave=False)
    for idx, img in loop:
        with open(img, "rb") as f:
            ImPar = ImageFile.Parser()
            chunk = f.read(2048)
            count = 2048
            while chunk != "":
                ImPar.feed(chunk)
                if ImPar.image:
                    break
                chunk = f.read(2048)
                count += 2048
            M, N = ImPar.image.size[0], ImPar.image.size[1]

        if M < 260 or N < 260:
            os.remove(input_imgs[idx])
            os.remove(gt_imgs[idx])
            cot += 1
コード例 #26
0
ファイル: test_imagefile.py プロジェクト: wikiped/Pillow
 def test_ico(self):
     with open('Tests/images/python.ico', 'rb') as f:
         data = f.read()
     with ImageFile.Parser() as p:
         p.feed(data)
         self.assertEqual((48, 48), p.image.size)
コード例 #27
0
def fetch_url(url, useragent, referer=None, retries=1, dimension=False):
    cur_try = 0
    nothing = None if dimension else (None, None)
    url = clean_url(url)
    if not url.startswith(('http://', 'https://')):
        return nothing

    response = None
    while True:
        try:
            response = requests.get(url,
                                    stream=True,
                                    timeout=5,
                                    headers={
                                        'User-Agent': useragent,
                                        'Referer': referer,
                                    })

            # if we only need the dimension of the image, we may not
            # need to download the entire thing
            if dimension:
                content = response.raw.read(chunk_size)
            else:
                content = response.raw.read()

            content_type = response.headers.get('Content-Type')

            if not content_type:
                return nothing

            if 'image' in content_type:
                p = ImageFile.Parser()
                new_data = content
                while not p.image and new_data:
                    try:
                        p.feed(new_data)
                    except IOError:
                        traceback.print_exc()
                        p = None
                        break
                    except ValueError:
                        traceback.print_exc()
                        p = None
                        break
                    except Exception as e:
                        # For some favicon.ico images, the image is so small
                        # that our PIL feed() method fails a length test.
                        is_favicon = (urls.url_to_filetype(url) == 'ico')
                        if is_favicon:
                            pass
                        else:
                            raise e
                        p = None
                        break
                    new_data = response.raw.read(chunk_size)
                    content += new_data

                if p is None:
                    return nothing
                # return the size, or return the data
                if dimension and p.image:
                    return p.image.size
                elif dimension:
                    return nothing
            elif dimension:
                # expected an image, but didn't get one
                return nothing

            return content_type, content

        except requests.exceptions.RequestException as e:
            cur_try += 1
            if cur_try >= retries:
                log.debug('error while fetching: %s refer: %s' %
                          (url, referer))
                return nothing
        finally:
            if response is not None:
                response.raw.close()
                if response.raw._connection:
                    response.raw._connection.close()
コード例 #28
0
ファイル: test_imagefile.py プロジェクト: chris34/pillow
 def test_ico(self):
     with open("Tests/images/python.ico", "rb") as f:
         data = f.read()
     with ImageFile.Parser() as p:
         p.feed(data)
         assert (48, 48) == p.image.size
コード例 #29
0
ファイル: PdftoImg.py プロジェクト: anandasmurthy74/MagicLamp
from PIL import ImageFile

fp = open("Test1.pdf", "rb")

p = ImageFile.Parser()

while 1:
    s = fp.read(1024)
    if not s:
        break
    else:
        p.feed(s)
im = p.close()
im.save("00985413-1_img.jpg")
コード例 #30
0
def load_image(item,
               collection,
               interrupt_fn,
               draft_mode=False,
               apply_transforms=True,
               itemfile=None):
    '''
    load a PIL image and store it in item.image
    if transform_handlers are specified and the image has tranforms they will be applied
    '''
    if itemfile is None:
        itemfile = collection.get_path(item)
    mimetype = io.get_mime_type(itemfile)
    oriented = False
    try:
        ##todo: load by mimetype (after porting to gio)
        #        non-parsed version
        if 'original_image' in item.__dict__:
            image = item.original_image.copy()
            oriented = True
        else:
            if not mimetype.startswith('image'):
                print 'No image available for item', item, 'with mimetype', mimetype
                item.image = False
                return False
            print 'Loading Image:', item, mimetype
            if io.get_mime_type(
                    itemfile
            ) in settings.raw_image_types:  ##for extraction with dcraw
                raise TypeError
            image = Image.open(
                itemfile
            )  ## retain this call even in the parsed version to avoid lengthy delays on raw images (since this call trips the exception)
            #        parsed version
            if not draft_mode and image.format == 'JPEG':
                #parser doesn't seem to work correctly on anything but JPEGs
                f = open(itemfile, 'rb')
                imdata = f.read(10000)
                p = ImageFile.Parser()
                while imdata and len(imdata) > 0:
                    p.feed(imdata)
                    if not interrupt_fn():
                        return False
                    imdata = f.read(10000)
                f.close()
                image = p.close()
                print 'Parsed image with PIL'
            else:
                raise TypeError
    except:
        try:
            if mimetype in gdk_mime_types:
                image_pb = gtk.gdk.pixbuf_new_from_file(itemfile)
                image_pb = orient_pixbuf(image_pb, item.meta)
                print image_pb.get_has_alpha()
                print image_pb.get_n_channels()
                print image_pb.get_colorspace()
                oriented = True
                width, height = image_pb.get_width(), image_pb.get_height()
                if image_pb.get_n_channels() >= 3:
                    if image_pb.get_has_alpha():
                        image = Image.fromstring("RGBA", (width, height),
                                                 image_pb.get_pixels())
                    else:
                        image = Image.fromstring("RGB", (width, height),
                                                 image_pb.get_pixels())
                else:
                    print "GDK Parser - Can't handle image with less than 3 channel"
                    raise TypeError
                print 'Parsed image with GDK'
            else:
                if mimetype in settings.raw_image_types:
                    cmd = settings.raw_image_types[mimetype][0] % (itemfile, )
                else:
                    cmd = settings.dcraw_cmd % (itemfile, )
                imdata = os.popen(cmd).read()
                if not imdata or len(imdata) < 100:
                    cmd = settings.dcraw_backup_cmd % (itemfile, )
                    oriented = True
                    imdata = os.popen(cmd).read()
                    if not interrupt_fn():
                        return False
                p = ImageFile.Parser()
                p.feed(imdata)
                image = p.close()
                print 'Parsed image with DCRAW'
        except:
            import sys
            import traceback
            tb_text = traceback.format_exc(sys.exc_info()[2])
            print 'Error Loading Image', item, mimetype
            print tb_text
            item.image = False
            return False
    print item.meta
    if draft_mode:
        image.draft(image.mode,
                    (1024, 1024))  ##todo: pull size from screen resolution
    if not interrupt_fn():
        return
    if oriented:
        item.image = orient_image(image, {})
    else:
        item.image = orient_image(image, item.meta)
    try:
        item.imagergba = 'A' in item.image.getbands()
    except:
        item.imagergba = False
    if item.image:
        if apply_transforms:
            transformer.apply_transforms(item, interrupt_fn)
        cache_image(item)
        return True
    return False