예제 #1
0
def adjust_img(path, width_ratio, height_ratio, min_width, min_height ):
    img = Image.open(path)
    if img.mode == 'CMYK':
        img = img.convert('RGB')
    width = float( img.size[0] )
    height = float( img.size[1] )
    
    calc_width = ( width / height ) * height_ratio
    calc_height = ( height / width ) * width_ratio
    
    if calc_width < min_width:
        new_height = ( height / width ) * min_width
        dim = min_width, int(new_height)
        out = ImageOps.fit(img, dim, Image.NEAREST, 0, (0.0,0.0))
        
    elif calc_height < min_height:
        new_width = ( width / height ) * min_height
        dim = int(new_width), min_height
        out = ImageOps.fit(img, dim, Image.NEAREST, 0, (0.0,0.0))
    
    else:
        if width < height  or width == height:
            new_height = ( height / width ) * min_width
            dim = min_width, int(new_height)
            out = ImageOps.fit(img, dim, Image.ANTIALIAS, 0, (0.0,0.0))
        elif width > height:
            new_width = ( width / height ) * min_height
            dim = int(new_width), min_height
            out = ImageOps.fit(img, dim, Image.ANTIALIAS, 0, (0.0,0.0))
            
    return out
예제 #2
0
 def save(self, force_insert=False, force_update=False, using=None,
          update_fields=None):
     self.topping_slug = slugify(self.topping_slug)
     super(Topping, self).save()
     sizes = {'thumbnail': {'height': 50, 'width': 50}, 'medium': {'height': 300, 'width': 300}, }
     photopath = str(self.topping_large_image.path)  # this returns the full system path to the original file
     im = Image.open(photopath)  # open the image using PIL
     # pull a few variables out of that full path
     extension = photopath.rsplit('.', 1)[1]  # the file extension
     filename = photopath.rsplit('\\', 1)[1].rsplit('.', 1)[0]  # the file name only (minus path or extension)
     fullpath = photopath.rsplit('\\', 1)[0]  # the path only (minus the filename.extension)
     # use the file extension to determine if the image is valid before proceeding
     if extension not in ['jpg', 'jpeg', 'gif', 'png']:
         pass
     # create medium image
     im = ImageOps.fit(im, (sizes['medium']['width'], sizes['medium']['height']), Image.ANTIALIAS)
     medname = filename + "_" + str(sizes['medium']['width']) + "x" + str(sizes['medium']['height']) + ".jpg"
     im.save(fullpath + '\\' + medname)
     self.topping_medium_image = '/media/'+self.filepath + medname
     # create thumbnail
     # im.thumbnail((sizes['thumbnail']['width'], sizes['thumbnail']['height']), Image.ANTIALIAS)
     im = ImageOps.fit(im, (sizes['thumbnail']['width'], sizes['thumbnail']['height']), Image.ANTIALIAS)
     thumbname = filename + "_" + str(sizes['thumbnail']['width']) + "x" + str(sizes['thumbnail']['height']) + ".jpg"
     im.save(fullpath + '\\' + thumbname)
     self.topping_small_image = '/media/'+self.filepath + thumbname
     super(Topping, self).save()
예제 #3
0
파일: utils.py 프로젝트: 32x32/fufufuu
    def transform(cls, key_type, source):
        """
        return a BytesIO object with the transformed image
        """

        file = open(source, 'rb')
        im = Image.open(file)
        ImageFile.MAXBLOCK = im.size[0] * im.size[1]

        if im.mode != 'RGB':
            im = im.convert('RGB')

        spec = specs[key_type]
        if spec.get('crop'):
            w, h = im.size[0], im.size[1]
            if w <= spec['width'] or h <= spec['height']:
                target_ratio = spec['width'] / spec['height']
                source_ratio = w / h
                if source_ratio >= target_ratio:
                    w = h * target_ratio
                else:
                    h = w / target_ratio
                w, h = int(w), int(h)
                im = ImageOps.fit(im, (w, h), Image.ANTIALIAS)
            else:
                im = ImageOps.fit(im, (spec['width'], spec['height']), Image.ANTIALIAS)
        else:
            im.thumbnail((spec['width'], spec['height']), Image.ANTIALIAS)

        output = BytesIO()
        im.save(output, format='JPEG', quality=spec.get('quality', 75), optimize=True, progressive=False)

        file.close()

        return output
예제 #4
0
	def _save(self, name, content, thumbnails_only=False):
		content_str = self.content_string(content)
		in_buffer = StringIO()
		in_buffer.write(content_str)
		in_buffer.seek(0)
		img = Image.open(in_buffer)
		
		# first resize the main image if necessary
		if not thumbnails_only:
			if self.max_size and (img.size[0] > self.max_size[0] or img.size[1] > self.max_size[1]):
				resized = ImageOps.fit(img, image.scale_to_fit(img.size, self.max_size), Image.ANTIALIAS)
				out_buffer = StringIO()
				image_type = image.image_type(name)
				args, kwargs = [out_buffer, image_type], {}
				if image_type == 'JPEG': kwargs['quality'] = 95
				resized.save(*args, **kwargs)
				self._put_file(name, out_buffer.getvalue())
				out_buffer.close()
			else:
				self._put_file(name, content_str)
		
		# now create each of the thumbnails
		for size in self.thumbnail_sizes.keys():
			resized = ImageOps.fit(img, self.thumbnail_sizes[size], Image.ANTIALIAS)
			out_buffer = StringIO()
			resized.convert('RGB').save(out_buffer, 'JPEG', quality=95)
			self._put_file(self.thumbnail_name(size, name), out_buffer.getvalue())
			out_buffer.close()

		in_buffer.close()		
		return name
예제 #5
0
def apply_polaroid(pixbuf,imageText):
    width,height = pixbuf.get_width(),pixbuf.get_height() 
    frameSize = (300,320)  
    imageOutputSize = (270,245) 
    imgModified = Image.open('images/frame.jpg')
    #cropped image to the requested framesize
    imgModified = ImageOps.fit(imgModified, frameSize, Image.ANTIALIAS, 0, (0.5,0.5))
    y = Image.frombytes(K.ImageConstants.RGB_SHORT_NAME,(width,height),pixbuf.get_pixels())
    #cropped image to the requested size
    y = ImageOps.fit(y, imageOutputSize, Image.ANTIALIAS, 0, (0.5,0.5))
    y = ImageOps.autocontrast(y, cutoff=2)
    y = ImageEnhance.Sharpness(y).enhance(2.0)
    
    boxOnImage = (12,18) 
    imgModified.paste(y, boxOnImage)
    
    #text on image
    textWidget = ImageDraw.Draw(imgModified).textsize(imageText)
    fontxy = (frameSize[0]/2 - textWidget[0]/2, 278)
    ImageDraw.Draw(imgModified).text(fontxy, imageText,fill=(40,40,40))
    
    imgOutput = Image.new(imgModified.mode, (300,320))
    imgOutput.paste(imgModified, (imgOutput.size[0]/2-imgModified.size[0]/2, imgOutput.size[1]/2-imgModified.size[1]/2))
 
    return I.fromImageToPixbuf(imgOutput)
예제 #6
0
 def save(self, force_insert=False, force_update=False):
     im = Image.open(self.image.path)
     if im.size != web_settings.COVER_SIZE:
         im = ImageOps.fit(im, web_settings.COVER_SIZE, Image.ANTIALIAS)
         if web_settings.OVER_COVER:
             im = put_watermark(im, web_settings.OVER_COVER, 0, "topleft")
         im.save(self.image.path, quality=100, optimize=0)
     if web_settings.COVER_THUMB_SIZE:
         thumb = ImageOps.fit(im, web_settings.COVER_THUMB_SIZE, Image.ANTIALIAS)
         thumb.save(self.cover_thumb_path, quality=100)
         
     if web_settings.HAS_HIGHLIGHT:
         im = Image.open(self.highlight.path)
         if not os.path.exists(self.highlight_thumb_path):
             thumb = ImageOps.fit(im, web_settings.HIGHLIGHT_THUMB_SIZE, Image.ANTIALIAS)
             thumb.save(self.highlight_thumb_path, quality=100)
         if im.size != web_settings.HIGHLIGHT_SIZE:
             im = ImageOps.fit(im, web_settings.HIGHLIGHT_SIZE, Image.ANTIALIAS)
         
         if web_settings.HIGHLIGHT_WATERMARK:
             im = put_watermark(im, web_settings.HIGHLIGHT_WATERMARK, 0, 
                                web_settings.HIGHLIGHT_WATERMARK_POSITION)
             
         im.save(self.highlight.path, quality=100, optimize=0)
         
     super(Album, self).save(force_insert, force_update)
def create_test(imgage_dir, img, imgs, resize_image):
    global img_size 
    global puzzle_size 
    another_image = ''
    while another_image == '' or another_image == img:
        another_image = imgs[random.randint(0, len(imgs) - 1)]
    subimage_position = (random.randint(0, img_size[0] - puzzle_size[0]), random.randint(0, img_size[1] - puzzle_size[1]))
    subimage = Image.open(os.path.join(imgage_dir, another_image))
    if resize_image:
        subimage = ImageOps.fit(subimage, (img_size[0], img_size[1]), method = Image.ANTIALIAS, centering = (0.5,0.5)) 
    subimage_puzzle_piece_filling = subimage.crop((subimage_position[0], subimage_position[1], subimage_position[0] + puzzle_size[0], subimage_position[1] + puzzle_size[1]))
    challenge_background = Image.open(os.path.join(imgage_dir, img))
    # crop to img_size centered
    (width, height) = challenge_background.size
    x_start, y_start = ((width - img_size[0])/2, (height - img_size[1])/2)
    if resize_image:
        # resize full image to size, keeping aspect ratio
        centered_challenge_background = ImageOps.fit(challenge_background, (img_size[0], img_size[1]), method = Image.ANTIALIAS, centering = (0.5,0.5)) 
    else:
        # or just crop a portion from the center
        centered_challenge_background = challenge_background.crop((x_start, y_start, x_start + img_size[0], y_start + img_size[1]))
    puzzle_piece_position = (random.randint(0, img_size[0] - puzzle_size[0]) / 10, random.randint(0, img_size[1] - puzzle_size[1]) / 10)
    puzzle_piece_position = (puzzle_piece_position[0] * 10, puzzle_piece_position[1] * 10)
    puzzle_piece = centered_challenge_background.crop((puzzle_piece_position[0], puzzle_piece_position[1], puzzle_piece_position[0] + puzzle_size[0], puzzle_piece_position[1] + puzzle_size[1]))
    centered_challenge_background = mergePNG(centered_challenge_background, subimage_puzzle_piece_filling, puzzle_piece_position)
    return centered_challenge_background, puzzle_piece, puzzle_piece_position
예제 #8
0
파일: image.py 프로젝트: ramas-jpg/kcc
 def resizeImage(self):
     if self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1]:
         method = Image.BICUBIC
     else:
         method = Image.LANCZOS
     if self.opt.stretch:
         self.image = self.image.resize(self.size, method)
     elif self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1] and not self.opt.upscale:
         if self.opt.format == 'CBZ':
             borderw = int((self.size[0] - self.image.size[0]) / 2)
             borderh = int((self.size[1] - self.image.size[1]) / 2)
             self.image = ImageOps.expand(self.image, border=(borderw, borderh), fill=self.fill)
             if self.image.size[0] != self.size[0] or self.image.size[1] != self.size[1]:
                 self.image = ImageOps.fit(self.image, self.size, method=Image.BICUBIC, centering=(0.5, 0.5))
     else:
         if self.opt.format == 'CBZ':
             ratioDev = float(self.size[0]) / float(self.size[1])
             if (float(self.image.size[0]) / float(self.image.size[1])) < ratioDev:
                 diff = int(self.image.size[1] * ratioDev) - self.image.size[0]
                 self.image = ImageOps.expand(self.image, border=(int(diff / 2), 0), fill=self.fill)
             elif (float(self.image.size[0]) / float(self.image.size[1])) > ratioDev:
                 diff = int(self.image.size[0] / ratioDev) - self.image.size[1]
                 self.image = ImageOps.expand(self.image, border=(0, int(diff / 2)), fill=self.fill)
             self.image = ImageOps.fit(self.image, self.size, method=method, centering=(0.5, 0.5))
         else:
             hpercent = self.size[1] / float(self.image.size[1])
             wsize = int((float(self.image.size[0]) * float(hpercent)))
             self.image = self.image.resize((wsize, self.size[1]), method)
             if self.image.size[0] > self.size[0] or self.image.size[1] > self.size[1]:
                 self.image.thumbnail(self.size, Image.LANCZOS)
예제 #9
0
    def save(self):
        sizes = {'thumbnail': {'height': 340, 'width': 300},
                 'medium': {'height': 370, 'width': 635}}

        super(Post_related_images, self).save()
        photopath = str(self.image.path)  # this returns the full system path
        # to the original file
        im = Image.open(photopath)  # open the image using PIL
#         ins=ImageOps()
    # pull a few variables out of that full path
        extension = photopath.rsplit('.', 1)[1]  # the file extension
        filename = photopath.rsplit('/', 1)[-1].rsplit('.', 1)[:-1][0]  # the
        # file name only (minus path or extension)
        fullpath = photopath.rsplit('/', 1)[:-1][0]  # the path only (minus
        # the filename.extension)
        # use the file extension to determine if the image is valid
        # before proceeding
        if extension not in ['jpg', 'jpeg', 'gif', 'png']:
            sys.exit()

        # create medium image
        ins = ImageOps.fit(im, (sizes['medium']['width'], sizes['medium']['height']), Image.ANTIALIAS)
        medname = str(filename) + "_" + str(sizes['medium']['width']) + "x" + str(sizes['medium']['height']) + ".jpg"
        ins.save(str(fullpath) + '/' + medname)
        self.largeimage = self.image.url.rsplit('/', 1)[:-1][0] + '/' + medname

        # create thumbnail
        ins = ImageOps.fit(im, (sizes['thumbnail']['width'], sizes['thumbnail']['height']), Image.ANTIALIAS)
        thumbname = filename + "_" + str(sizes['thumbnail']['width']) + "x" + str(sizes['thumbnail']['height']) + ".jpg"
        ins.save(fullpath + '/' + thumbname)
        self.smallimage = self.image.url.rsplit('/', 1)[:-1][0] + '/' + thumbname

        super(Post_related_images, self).save()
예제 #10
0
파일: makeData.py 프로젝트: mpslxz/CNN-LDH
def makeData(dim):
    imageCount = 580
    size = (dim, dim)
    trainingPercent = 0.7
    testPercent = 0.1
    validationPercent = 0.2
    winDim = ceil(2*dim/3)
    laminae = np.ndarray((dim*dim, winDim*winDim + 84*3))

    # labels = np.zeros((dim*dim, 2))
    labels = np.zeros((dim*dim))

    fileCount = 0

    print "Loading Laminae...\n"
    for i in range(1, imageCount+1):
        print i
        # img = Image.open("newLam/img_%d.jpg" %i)
        img = Image.open("/home/mehran/Desktop/ConvNet/newLam/img_%d.jpg" %i)
        img = ImageOps.fit(img, size)
        # imgEn = Image.open("enhanced/img_%d.jpg" %i)
        imgEn = Image.open("/home/mehran/Desktop/ConvNet/enhanced/img_%d.jpg" %i)
        imgEn = ImageOps.fit(img, size)

        segmented = Image.open("/home/mehran/Desktop/ConvNet/base of lamina/lamBase_%d.jpg" %i)
        segmented = ImageOps.fit(segmented, size)

        paddedImage = addPaddingZero(img, dim)
        paddedImageEn = addPaddingZero(imgEn, dim)
         #paddedSegmented = addPaddingZero(segmented, dim)
        count = 0
        for m in range(0, dim):
            for n in range(0, dim):
                window = paddedImage[m:m+winDim, n:n+winDim]
                windowEn = paddedImageEn[m:m+winDim, n:n+winDim]
                Res = 0.7*windowEn + 0.3*window
                laminae[count, :] = np.append(np.reshape(Res, winDim*winDim)/np.amax(Res), np.asarray(extractFeatures.extractFeatures(Res/np.amax(Res))))
                S = segmented.load()

                labels[count] = 1 if S[m, n] > 0 else 0
                 # labels[count, 0] = 1 if S[m, n] > 0 else 0
                 # labels[count, 1] = 1 - S[m, n]
                count += 1

        # f = file("leftImgs/Pickles/sample_%d.p" %i, 'wb')
        # pickle.dump(laminae, f)
     #
        posIndices = np.nonzero(labels)[0]
        posSampleSize = np.count_nonzero(labels)
        if(posSampleSize > 0):
            print i
            negIndices = np.random.randint(0, dim*dim, posSampleSize)
            a = [labels[v] for v in posIndices]
            b = [labels[u] for u in negIndices]

            L = np.hstack((a, b))
            Patches = np.vstack(([laminae[v,:] for v in posIndices], [laminae[u,:] for u in negIndices]))
            f = file("/home/mehran/Desktop/ConvNet/Pickles/balanced/enhanced_overlay+lamBase_dilated_new_labels_with_LDH/sample_balanced_%d.p" %fileCount, 'wb')
            pickle.dump([Patches, L], f)
            fileCount += 1
예제 #11
0
def process_img(fname, r):
    im = Image.open(fname)
    out = ImageOps.fit(im, (320,240), Image.ANTIALIAS)
    print "rendering thumnail for: %s" % fname
    out.save("../images/locations/%s.jpg" % r['id'], "jpeg")
    out2 = ImageOps.fit(im, (800,600), Image.ANTIALIAS)
    out2.save("../images/locations/%s-large.jpg" % r['id'], "jpeg")
예제 #12
0
파일: fields.py 프로젝트: Namejs/workr
    def put(self, file_obj, **kwargs):
        """
        Insert a image in database
        applying field properties (size, thumbnail_size)
        """
        field = self.instance._fields[self.key]

        try:
            img = Image.open(file_obj)
            img_format = img.format
        except:
            raise ValidationError('Invalid image')

        if (field.size and (img.size[0] > field.size['width'] or
                            img.size[1] > field.size['height'])):
            size = field.size

            if size['force']:
                img = ImageOps.fit(img,
                                   (size['width'],
                                    size['height']),
                                   Image.ANTIALIAS)
            else:
                img.thumbnail((size['width'],
                               size['height']),
                              Image.ANTIALIAS)

        thumbnail = None
        if field.thumbnail_size:
            size = field.thumbnail_size

            if size['force']:
                thumbnail = ImageOps.fit(img,
                                   (size['width'],
                                    size['height']),
                                   Image.ANTIALIAS)
            else:
                thumbnail = img.copy()
                thumbnail.thumbnail((size['width'],
                                     size['height']),
                                    Image.ANTIALIAS)

        if thumbnail:
            thumb_id = self._put_thumbnail(thumbnail,
                                          img_format)
        else:
            thumb_id = None

        w, h = img.size

        io = StringIO()
        img.save(io, img_format)
        io.seek(0)

        return super(ImageGridFsProxy, self).put(io,
                                                 width=w,
                                                 height=h,
                                                 format=img_format,
                                                 thumbnail_id=thumb_id,
                                                 **kwargs)
예제 #13
0
def pickle_dataset(input_pkl, output_pkl, img_path, id_label, PIXELS):
    data = pd.read_pickle(input_pkl)
    dataset = {}

    iter_images = iter(data[id_label])
    first_image = next(iter_images)
    im = Image.open(img_path + first_image + '.jpg', 'r')
    im = ImageOps.fit(im, (PIXELS, PIXELS), Image.ANTIALIAS)
    im = (np.array(im))
    r = im[:, :, 0].flatten()
    g = im[:, :, 1].flatten()
    b = im[:, :, 2].flatten()

    img_list = np.array(list(r) + list(g) + list(b), dtype='uint8')
    img_list = img_list[np.newaxis, :]

    for img_name in iter_images:
        im = Image.open(img_path + img_name + '.jpg', 'r')
        im = ImageOps.fit(im, (PIXELS, PIXELS), Image.ANTIALIAS)
        im = (np.array(im))
        r = im[:, :, 0].flatten()
        g = im[:, :, 1].flatten()
        b = im[:, :, 2].flatten()

        img = np.array(list(r) + list(g) + list(b), dtype='uint8')
        img_list = np.vstack((img_list, img[np.newaxis, :]))

    hkl.dump(img_list, output_pkl + '_data.hpy', mode='w', compression='gzip')
    hkl.dump(data['label'], output_pkl + '_labels.hpy', mode='w')

    del img_list
    del data
예제 #14
0
파일: image.py 프로젝트: kyuzumaki/kcc
 def resizeImage(self):
     if self.opt.bordersColor:
         fill = self.opt.bordersColor
     else:
         fill = self.fill
     # Set target size
     if self.opt.quality == 0:
         size = (self.size[0], self.size[1])
     elif self.opt.quality == 1 and not self.opt.stretch and not self.opt.upscale and self.image.size[0] <=\
             self.size[0] and self.image.size[1] <= self.size[1]:
         size = (self.size[0], self.size[1])
     elif self.opt.quality == 1:
         # Forcing upscale to make sure that margins will be not too big
         if not self.opt.stretch:
             self.opt.upscale = True
         size = (self.panelviewsize[0], self.panelviewsize[1])
     elif self.opt.quality == 2 and not self.opt.stretch and not self.opt.upscale and self.image.size[0] <=\
             self.size[0] and self.image.size[1] <= self.size[1]:
         # HQ version will not be needed
         self.noHQ = True
         return
     else:
         size = (self.panelviewsize[0], self.panelviewsize[1])
     # If stretching is on - Resize without other considerations
     if self.opt.stretch:
         if self.image.size[0] <= size[0] and self.image.size[1] <= size[1]:
             method = Image.BICUBIC
         else:
             method = Image.LANCZOS
         self.image = self.image.resize(size, method)
         return
     # If image is smaller than target resolution and upscale is off - Just expand it by adding margins
     if self.image.size[0] <= size[0] and self.image.size[1] <= size[1] and not self.opt.upscale:
         borderw = int((size[0] - self.image.size[0]) / 2)
         borderh = int((size[1] - self.image.size[1]) / 2)
         # PV is disabled when source image is smaller than device screen and upscale is off
         if self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1]:
             self.noPV = True
         self.image = ImageOps.expand(self.image, border=(borderw, borderh), fill=fill)
         # Border can't be float so sometimes image might be 1px too small/large
         if self.image.size[0] != size[0] or self.image.size[1] != size[1]:
             self.image = ImageOps.fit(self.image, size, method=Image.BICUBIC, centering=(0.5, 0.5))
         return
     # Otherwise - Upscale/Downscale
     ratioDev = float(size[0]) / float(size[1])
     if (float(self.image.size[0]) / float(self.image.size[1])) < ratioDev:
         diff = int(self.image.size[1] * ratioDev) - self.image.size[0]
         self.image = ImageOps.expand(self.image, border=(int(diff / 2), 0), fill=fill)
     elif (float(self.image.size[0]) / float(self.image.size[1])) > ratioDev:
         diff = int(self.image.size[0] / ratioDev) - self.image.size[1]
         self.image = ImageOps.expand(self.image, border=(0, int(diff / 2)), fill=fill)
     if self.image.size[0] <= size[0] and self.image.size[1] <= size[1]:
         method = Image.BICUBIC
     else:
         method = Image.LANCZOS
     self.image = ImageOps.fit(self.image, size, method=method, centering=(0.5, 0.5))
     return
예제 #15
0
    def test_1pxfit(self):
        # Division by zero in equalize if image is 1 pixel high
        newimg = ImageOps.fit(hopper("RGB").resize((1, 1)), (35, 35))
        self.assertEqual(newimg.size, (35, 35))

        newimg = ImageOps.fit(hopper("RGB").resize((1, 100)), (35, 35))
        self.assertEqual(newimg.size, (35, 35))

        newimg = ImageOps.fit(hopper("RGB").resize((100, 1)), (35, 35))
        self.assertEqual(newimg.size, (35, 35))
예제 #16
0
def test_1pxfit():
    # Division by zero in equalize if image is 1 pixel high
    newimg = ImageOps.fit(lena("RGB").resize((1,1)), (35,35))
    assert_equal(newimg.size,(35,35))
    
    newimg = ImageOps.fit(lena("RGB").resize((1,100)), (35,35))
    assert_equal(newimg.size,(35,35))

    newimg = ImageOps.fit(lena("RGB").resize((100,1)), (35,35))
    assert_equal(newimg.size,(35,35))
예제 #17
0
def loadData(dim):
    size = (dim, dim)
    trainingPercent = 0.7
    testPercent = 0.1
    validationPercent = 0.2
    laminae = np.ndarray((1149, dim * dim))
    notLaminae = np.ndarray((1149, dim * dim))

    print "Loading Laminae...\n"
    for i in range(1, 1150):
        img = Image.open("lamina/img_%d.jpg" % i)
        img = ImageOps.fit(img, dim)
        # img.thumbnail(size, Image.ANTIALIAS)
        laminae[i - 1, :] = np.reshape(np.asarray(img), dim * dim) / np.amax(np.asarray(img))

    print "Loading Not-laminae...\n"
    for i in range(1, 1150):
        img = Image.open("notLamina/IMG_%d.jpg" % i)
        img = ImageOps.fit(img, size)
        # img.thumbnail(size, Image.ANTIALIAS)
        notLaminae[i - 1, :] = np.reshape(np.asarray(img), dim * dim) / np.amax(np.asarray(img))

    print "Stacking data...\n"
    data = np.vstack((laminae, notLaminae))
    labels = np.hstack((np.ones(1149), np.zeros(1149)))
    # l1 = np.hstack((np.ones(574), np.zeros(574)))
    # l2 = np.hstack((np.zeros(574), np.ones(574)))
    # labels = np.vstack([l1, l2])

    print "Making training data...\n"
    ind = np.random.randint(0, 2298, 2299 * trainingPercent)
    training_data = data[ind, :]
    training_labels = labels[ind]

    print "Making test data...\n"
    ind = np.random.randint(0, 2298, 2299 * testPercent)
    test_data = data[ind, :]
    test_labels = labels[ind]

    print "Making validation data...\n"
    ind = np.random.randint(0, 2298, 2299 * validationPercent)
    validation_data = data[ind, :]
    validation_labels = labels[ind]

    def shared(data):
        shared_x = theano.shared(np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
        shared_y = theano.shared(np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
        return shared_x, T.cast(shared_y, "int32")

    return (
        shared([training_data, training_labels]),
        shared([test_data, test_labels]),
        shared([validation_data, validation_labels]),
    )
예제 #18
0
파일: ajax.py 프로젝트: MHM5000/tweeria
	def resizeImage(self, buffer_filepath, local_filepath, params, cropped_image = False):

		image = Image.open(buffer_filepath)

		width, height = image.size
		imgData = {
			"width" : width,
			"height": height
		}

		need_resize = False

		createSmallItemSpell = False
		if "type_of_form" in params and params['type_of_form'] == "create_artwork":
			checkWidth = self.core.MAX_ARTWORK_WIDTH
			checkHeight = self.core.MAX_ARTWORK_HEIGHT

		elif "type_of_form" in params and (params['type_of_form'] == "create_item" or params['type_of_form'] == "create_spell"):
			checkWidth = self.core.MAX_ITEM_SPELL_WIDTH
			checkHeight = self.core.MAX_ITEM_SPELL_HEIGHT
			createSmallItemSpell = True
		else:
			checkWidth = self.core.MAX_AVA_WIDTH
			checkHeight = self.core.MAX_AVA_HEIGHT

		if width > checkWidth:
			width = checkWidth
			need_resize = True

		if height > checkHeight:
			height = checkHeight
			need_resize = True

		#need_resize = False

		if need_resize:
			if params['type_of_form'] == "create_artwork":
				thumb = ImageOps.fit(cropped_image, (self.core.THUMB_ARTWORK_WIDTH, self.core.THUMB_ARTWORK_HEIGHT), Image.ANTIALIAS)
			else:
				thumb = ImageOps.fit(cropped_image, (width,height), Image.ANTIALIAS)
			thumb.save(local_filepath+"_fit.png", "PNG")
		elif params['type_of_form'] == "create_artwork":
			thumb = ImageOps.fit(cropped_image, (self.core.THUMB_ARTWORK_WIDTH, self.core.THUMB_ARTWORK_HEIGHT), Image.ANTIALIAS)
			thumb.save(local_filepath+"_fit.png", "PNG")
		else:
			image.save(local_filepath+"_fit.png", "PNG")


		if createSmallItemSpell:
			thumb = ImageOps.fit(cropped_image, (self.core.THUMB_ITEM_SPELL_WIDTH, self.core.THUMB_ITEM_SPELL_HEIGHT), Image.ANTIALIAS)
			thumb_local_filpath = local_filepath+"_thumb.png"
			thumb.save(thumb_local_filpath,"PNG")
		return imgData
예제 #19
0
파일: image.py 프로젝트: ZhouYunan/moto-moe
    def save(self, r):
        if isinstance(r,ResumableFile):
            file_obj =  NamedTemporaryFile(delete=True)
            for data in r.chunks():
                file_obj.write(data)
        else: #temp file
            file_obj = r
        try:
            img = Image.open(file_obj)
        except Exception as e:
            pass
        if (IMAGE_SIZE.size and (img.size[0] > IMAGE_SIZE.size['width'] or
                            img.size[1] > IMAGE_SIZE.size['height'])):
            size = IMAGE_SIZE.size

            if size['force']:
                img = ImageOps.fit(img,
                                   (size['width'],
                                    size['height']),
                                   Image.ANTIALIAS)
            else:
                img.thumbnail((size['width'],
                               size['height']),
                              Image.ANTIALIAS)
            try:
                img.save(self.large_path)
            except FileNotFoundError:
                os.makedirs(os.path.dirname(self.large_path),755)
                img.save(self.large_path)
        else:
             try:
                img.save(self.large_path)
             except FileNotFoundError:
                os.makedirs(os.path.dirname(self.large_path),755)
                img.save(self.large_path)
        if IMAGE_SIZE.thumbnail:
            size = IMAGE_SIZE.thumbnail

            if size['force']:
                thumbnail = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS)
            else:
                thumbnail = img.copy()
                thumbnail.thumbnail((size['width'],
                                     size['height']),
                                    Image.ANTIALIAS)
            try:
                thumbnail.save(self.thumbnail_path)
            except FileNotFoundError:
                os.makedirs(os.path.dirname(self.thumbnail_path))
                thumbnail.save(self.thumbnail_path)
        file_obj.close()
예제 #20
0
	async def passeport_theme(self, ctx, theme: str = ""):
		self.conn = db.connect_to_db(self)
		self.cursor = self.conn.cursor()

		possible_theme = ["dark", "light", "preview"]

		if theme.lower() in possible_theme:
			if theme.lower() == "dark":
				self.cursor.execute("""UPDATE passport SET theme = %s WHERE userid = %s""", ("dark", str(ctx.message.author.id)))
				self.conn.commit()

				em = discord.Embed(title='Configuration terminée', description="Thème enregistré avec succes", colour=0x28a745)
				await ctx.send(embed=em)
			elif theme.lower() == "light":
				self.cursor.execute("""UPDATE passport SET theme = %s WHERE userid = %s""", ("light", str(ctx.message.author.id)))
				self.conn.commit()

				em = discord.Embed(title='Configuration terminée', description="Thème enregistré avec succes", colour=0x28a745)
				await ctx.send(embed=em)
			else:
				wait_message = await ctx.send(f"Laissez moi juste le temps de superposer les 2 passeports, je vous prie de bien vouloir patienter...")
				cardbg = Image.new('RGBA', (1600, 500), (0, 0, 0, 255))

				card_dark = await generate_passport(self, ctx.author, "dark")
				card_dark.save(f'data/tmp/{ctx.author.id}_dark.png', 'png')

				card_light = await generate_passport(self, ctx.author, "light")
				card_light.save(f'data/tmp/{ctx.author.id}_light.png', 'png')

				saved_card_dark = Image.open(f'data/tmp/{ctx.author.id}_dark.png')
				saved_card_light = Image.open(f'data/tmp/{ctx.author.id}_light.png')

				saved_card_dark = ImageOps.fit(saved_card_dark, (800, 500))
				saved_card_light = ImageOps.fit(saved_card_light, (800, 500))

				cardbg.paste(saved_card_dark, (0, 0))
				cardbg.paste(saved_card_light, (800, 0))

				cardbg.save(f'data/tmp/{ctx.author.id}.png', 'png')

				with open(f'data/tmp/{ctx.author.id}.png', 'rb') as g:
					await ctx.send(file=discord.File(g))
					await wait_message.delete()
					await ctx.send(f"Et voila {ctx.author.mention} ! à gauche votre passeport avec le thème \"dark\" et à droite avec le thème \"light\" :wink:")

				shutil.rmtree("data/tmp")
				os.mkdir("data/tmp")

		else:
			em = discord.Embed(title='Une erreur est survenue', description="Les choix possible pour cette commande sont : `dark`, `light`, `preview`", colour=0xDC3546)
			await ctx.send(embed=em)
예제 #21
0
    def produce_image(self, card_one, card_two):
        result = dict()
        im1_file_path = settings.STATIC_ROOT_CARD_IMAGES + '/' + str(card_one.multiverseid) + '.jpg'
        im2_file_path = settings.STATIC_ROOT_CARD_IMAGES + '/' + str(card_two.multiverseid) + '.jpg'
        mask_file_path = settings.STATIC_ROOT_CN + '/' + 'card_mask.png'
        logo_file_path = settings.STATIC_ROOT_CN + '/' + 'cardninja_glow.png'
        try:
            mask = Image.open(mask_file_path).convert('L')

            logo = Image.open(logo_file_path)
            logo = logo.resize((int(logo.size[0] * .75), int(logo.size[1] * .75)), resample=Image.BICUBIC)

            im1_card = Image.open(im1_file_path)
            if im1_card.mode != 'RGBA':
                im1_card = im1_card.convert('RGBA')
                im1_card = ImageOps.fit(im1_card, mask.size, centering=(0.5, 0.5))
                im1_card.putalpha(mask)
            im2_card = Image.open(im2_file_path)
            if im2_card.mode != 'RGBA':
                im2_card = im2_card.convert('RGBA')
                im2_card = ImageOps.fit(im2_card, mask.size, centering=(0.5, 0.5))
                im2_card.putalpha(mask)

            im_result = Image.new('RGBA', (546, 340))
            back_crop = im1_card.crop((30, 60, 200, 140))
            back_crop = back_crop.resize((int(170 * 3.5), int(120 * 3.5)), resample=Image.BICUBIC)
            back_crop = back_crop.filter(ImageFilter.GaussianBlur(radius=5))

            im1_rot = im1_card.rotate(9, resample=Image.BICUBIC, expand=True)
            im2_rot = im2_card.rotate(-9, resample=Image.BICUBIC, expand=True)
            im1_rot_a = im1_rot.filter(ImageFilter.GaussianBlur(radius=2))
            im2_rot_a = im2_rot.filter(ImageFilter.GaussianBlur(radius=2))

            im_result.paste(back_crop, (0, 0))
            im_result.paste(im2_rot_a, (235, 0), im2_rot_a)
            im_result.paste(im2_rot, (235, 0), im2_rot)
            im_result.paste(im1_rot_a, (40, 0), im1_rot_a)
            im_result.paste(im1_rot, (40, 0), im1_rot)
            im_result.paste(logo, (im_result.size[0] - 5 - logo.size[0], im_result.size[1] - 5 - logo.size[1],), logo)

            m = hashlib.md5()
            m.update('{}-{}'.format(card_one.basecard.filing_name, card_two.basecard.filing_name))
            output_filename = m.hexdigest() + '.jpg'
            output_full_filename = settings.DYNAMIC_IMAGE_FILE_ROOT + '/' + output_filename
            im_result.save(output_full_filename, 'JPEG', quality=88)
            # 'd' will need to be setup in Apache, pointing to settings.DYNAMIC_IMAGE_FILE_ROOT
            result = {'filename': output_full_filename,
                      'url': 'http://card.ninja/d/{}'.format(output_filename)}
        except IOError as ioe:
            out.write("Oh no. " + str(ioe))
        return result
예제 #22
0
def _load_batch(pixels, input_pkl, img_path, batch_size=32, seed=None, n=5):
    """
    Helper function to load images and format it for classification
    :param pixels: size of images (one side)
    :param input_pkl: path to pickle
    :param img_path: path to the images
    :param batch_size: batch size
    :param seed: random seed for repeatability
    :param n: number of classes
    :return: list with images and list with labels
    """
    data = pd.read_pickle(input_pkl)
    # Sample the data randomly
    sample = data.sample(n=batch_size, random_state=seed)

    # Iterate the samples based on the photo_id
    iter_images = iter(sample['photo_id'])

    # Initialize arrays
    first_image = next(iter_images)
    # Open image and fit to size
    im = Image.open(img_path + first_image + '.jpg', 'r')
    im = ImageOps.fit(im, (pixels, pixels), Image.ANTIALIAS)
    # Convert image to numpy array and extract RGB channels
    im = (np.array(im))
    r = im[:, :, 0].flatten()
    1
    g = im[:, :, 1].flatten()
    b = im[:, :, 2].flatten()

    # Create image list
    img_list = np.array(list(r) + list(g) + list(b), dtype='uint8')
    img_list = img_list[np.newaxis, :]

    # Do the same for all the images in the iterator
    for img_name in iter_images:
        im = Image.open(img_path + img_name + '.jpg', 'r')
        im = ImageOps.fit(im, (pixels, pixels), Image.ANTIALIAS)
        im = (np.array(im))
        r = im[:, :, 0].flatten()
        g = im[:, :, 1].flatten()
        b = im[:, :, 2].flatten()

        img = np.array(list(r) + list(g) + list(b), dtype='uint8')
        # Stack the image vectors vertically
        img_list = np.vstack((img_list, img[np.newaxis, :]))

    # Labels are already the 'label' column of the sample data
    label_list = sample['label']
    return img_list, label_list
예제 #23
0
파일: images.py 프로젝트: vaultah/L
    def _store_n_link(cls, acct, file, allow_gif=False):
        # `file` argument must be provided
        content = file.read()
        # Keep the original object unmodified.
        # It won't be used anywhere in this function
        file.seek(0)

        if len(content) > consts.MAX_IMAGE_SIZE:
            raise ValueError('Image is too large')

        try:
            # Try to get image type
            img = BaseImage.open(io.BytesIO(content))
            if img.format not in cls._allowed.union({'GIF'} if allow_gif else set()):
                raise ValueError
        except (IOError, ValueError) as e:
            raise ValueError('Invalid image type') from None


        name = '{}.{}'.format(utils.unique_id()[0], img.format.lower())
        sizes = (consts.ORIGINAL_IMAGE, consts.SQUARE_THUMBNAIL, consts.SHRINKED_IMAGE)
        names = [consts.MEDIA_IMAGES / '{}-{}'.format(x, name) for x in sizes]

        consts.MEDIA_IMAGES.mkdir(parents=True, exist_ok=True)
        
        # Save full image without changin' a byte
        with names[0].open('wb') as unmodified:
            unmodified.write(content)

        # Construct `PIL.Image` instance and make a thumbnail and a shrinked copy

        # Thumbnails are always square
        ImageOps.fit(img, (100, 100), BaseImage.ANTIALIAS).save(str(names[1]), quality=100)
        # Shrinked image is a fixed-width image derived from the full-size image
        # Don't modify GIF images
        if consts.SHRINKED_WIDTH < img.size[0] and img.format != 'GIF':
            nh = math.ceil(consts.SHRINKED_WIDTH / img.size[0] * img.size[1])
            shrinked = ImageOps.fit(img, (consts.SHRINKED_WIDTH, nh), BaseImage.ANTIALIAS)
            shrinked.save(str(names[2]), quality=100)
        else:
            with names[2].open('wb') as shrinked:
                shrinked.write(content)

        # Link the image to `acct`, create a new `Image` instance and return it
        data = {'name': name, 'owner': acct.id, 'id': utils.unique_id()[0], 'score': 0}
        cls.collection.insert_one(data)
        data['owner'] = acct
        data['file'] = img
        return data
예제 #24
0
파일: storages.py 프로젝트: Bramas/picapi
	def save(self, id, secret, o_secret, ext, options=None):
		o_filename = str(id) + '_' + secret + '_' + o_secret + ext
		if 'save' in options:
			options['save'](config.Path.Uploads, o_filename)
		else:
			return False

		basename = str(id) + '_' + secret
		im = Image.open(join(config.Path.Uploads, o_filename))
		if max(im.size) > 2048:
			im.thumbnail([2048,2048], Image.ANTIALIAS)
			im.save(join(config.Path.CachePhotos, basename+'_k'+ext), "JPEG")

		thumb = ImageOps.fit(im, [75,75], Image.ANTIALIAS)
		thumb.save(join(config.Path.CachePhotos, str(id) + '_' + secret + '_s' + ext), "JPEG")
		thumb = ImageOps.fit(im, [150,150], Image.ANTIALIAS)
		thumb.save(join(config.Path.CachePhotos, str(id) + '_' + secret + '_q' + ext), "JPEG")
		

		im2 = im.copy()
		im2.thumbnail([1600,1600], Image.ANTIALIAS)
		im2.save(join(config.Path.CachePhotos, basename+'_h'+ext), "JPEG")

		im = im.copy()
		im.thumbnail([1024,1024], Image.ANTIALIAS)
		im.save(join(config.Path.CachePhotos, basename+'_b'+ext), "JPEG")

		im2 = im.copy()
		im2.thumbnail([800,800], Image.ANTIALIAS)
		im2.save(join(config.Path.CachePhotos, basename+'_c'+ext), "JPEG")

		im2 = im.copy()
		im2.thumbnail([640,640], Image.ANTIALIAS)
		im2.save(join(config.Path.CachePhotos, basename+'_z'+ext), "JPEG")

		im2 = im.copy()
		im2.thumbnail([500,500], Image.ANTIALIAS)
		im2.save(join(config.Path.CachePhotos, basename+ext), "JPEG")

		im2 = im.copy()
		im2.thumbnail([320,320], Image.ANTIALIAS)
		im2.save(join(config.Path.CachePhotos, basename+'_n'+ext), "JPEG")

		im2 = im.copy()
		im2.thumbnail([240,240], Image.ANTIALIAS)
		im2.save(join(config.Path.CachePhotos, basename+'_m'+ext), "JPEG")

		return True
예제 #25
0
def preprocess(file_name, variations, storage):
    with storage.open(file_name) as f:
        with Image.open(f) as image:
            file_format = 'PNG'

            # resize to a maximum of 1000x1000 keeping aspect ratio
            image.thumbnail((1000, 1000), resample=Image.ANTIALIAS)

            # Create a disk as mask
            mindimension = min(1000, image.size[1], image.size[0])
            bigsize = (mindimension * 3, mindimension * 3)
            mask = Image.new('L', bigsize, 0)
            draw = ImageDraw.Draw(mask)
            draw.ellipse((0, 0) + bigsize, fill=255)
            mask = mask.resize((mindimension, mindimension), Image.ANTIALIAS)

            # only keep the image that fit in the mask
            output = ImageOps.fit(image, mask.size, centering=(0.5, 0.5))
            output.putalpha(mask)

            with BytesIO() as file_buffer:
                output.save(file_buffer, file_format)
                f = ContentFile(file_buffer.getvalue())
                # delete the original big image
                storage.delete(file_name)
                # save the resized version with the same filename and format
                storage.save(file_name, f)

    # render stdimage variations
    render_variations(file_name, variations, replace=True, storage=storage)

    return False  # prevent default rendering
예제 #26
0
def crop_image(photo, size=False, encode_b64=True, height_photo=0.0, width_photo=0.0):
    if photo is None or not photo:
        return False
    try:
        photo = base64.b64decode(photo)
    except: return False
    image_stream = io.BytesIO(photo)
    foto = Image.open(image_stream)
    format_image = str(foto.format).lower()
    if not size:
        width = foto.size[0]
        height = foto.size[1]
        size = height
        if width != height:
            if width < height:
                size = width
    height = int(size + (size * height_photo / 100))
    width = int(size + (size * width_photo / 100))
    foto = ImageOps.fit(foto, (width, height), Image.ANTIALIAS, centering=(0.5, 0.15))
    
    background_stream = StringIO.StringIO()
    foto.save(background_stream, format=format_image)
    
    foto = background_stream.getvalue()
    if encode_b64:
        foto = foto.encode('base64')
    return foto
예제 #27
0
파일: image.py 프로젝트: Tambralinga/RIS
def resize_image_thumb(inp, outp, width=200, height=200):
    """
    Resizes an image
    """
    img = Pil_Image.open(inp)
    img = ImageOps.fit(img, (width, height), Pil_Image.ANTIALIAS)
    img.save(outp, format='JPEG', quality=75)
예제 #28
0
def resize_image(photo, size_base=64, encode_b64=True, ignore_original_dimensions=True):
    if photo is None or not photo:
        return False
    try:
        photo = base64.b64decode(photo)
    except: return False
    image_stream = io.BytesIO(photo)
    foto = Image.open(image_stream)
    format_image = str(foto.format).lower()
    width = foto.size[0]
    height = foto.size[1]
    if width != height:
        if width < height:
            width = width * size_base / height
            height = size_base
        else:
            height = height * size_base / width
            width = size_base
    else:
        height = size_base
        width = size_base
        
    if not ignore_original_dimensions:
        if width > foto.size[0] or height > foto.size[1]:
            width = foto.size[0]
            height = foto.size[1]
    foto = ImageOps.fit(foto, (width, height), Image.ANTIALIAS)
    
    background_stream = StringIO.StringIO()
    foto.save(background_stream, format=format_image)
    
    foto = background_stream.getvalue()
    if encode_b64:
        foto = foto.encode('base64')
    return foto
예제 #29
0
def get_placeholder_image(width, height, name=None, fg_color=get_color('black'),
        bg_color=get_color('grey'), text=None, font=u'Verdana.ttf',
        fontsize=42, encoding=u'unic', mode='RGBA', fmt=u'PNG'):
    """Little spin-off from https://github.com/Visgean/python-placeholder
    that not saves an image and instead returns it."""
    size = (width, height)
    text = text if text else '{0}x{1}'.format(width, height)

    try:
        font = ImageFont.truetype(font, size=fontsize, encoding=encoding)
    except IOError:
        font = ImageFont.load_default()

    result_img = Image.new(mode, size, bg_color)

    text_size = font.getsize(text)
    text_img = Image.new("RGBA", size, bg_color)

    #position for the text:
    left = size[0] / 2 - text_size[0] / 2
    top = size[1] / 2 - text_size[1] / 2

    drawing = ImageDraw.Draw(text_img)
    drawing.text((left, top),
                 text,
                 font=font,
                 fill=fg_color)

    txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5))

    result_img.paste(txt_img)
    file_obj = io.BytesIO()
    txt_img.save(file_obj, fmt)

    return file_obj.getvalue()
예제 #30
0
def image_resize_image(base64_source,
                       size=(1024, 1024),
                       encoding='base64',
                       filetype='PNG',
                       avoid_if_small=False):
    """ Function to resize an image. The image will be resized to the given
        size, while keeping the aspect ratios, and holes in the image will be
        filled with transparent background. The image will not be stretched if
        smaller than the expected size.
        Steps of the resizing:
        - Compute width and height if not specified.
        - if avoid_if_small: if both image sizes are smaller than the requested
          sizes, the original image is returned. This is used to avoid adding
          transparent content around images that we do not want to alter but
          just resize if too big. This is used for example when storing images
          in the 'image' field: we keep the original image, resized to a maximal
          size, without adding transparent content around it if smaller.
        - create a thumbnail of the source image through using the thumbnail
          function. Aspect ratios are preserved when using it. Note that if the
          source image is smaller than the expected size, it will not be
          extended, but filled to match the size.
        - create a transparent background that will hold the final image.
        - paste the thumbnail on the transparent background and center it.

        :param base64_source: base64-encoded version of the source
            image; if False, returns False
        :param size: 2-tuple(width, height). A None value for any of width or
            height mean an automatically computed value based respectivelly
            on height or width of the source image.
        :param encoding: the output encoding
        :param filetype: the output filetype
        :param avoid_if_small: do not resize if image height and width
            are smaller than the expected size.
    """
    if not base64_source:
        return False
    if size == (None, None):
        return base64_source
    image_stream = io.BytesIO(base64_source.decode(encoding))
    image = Image.open(image_stream)

    asked_width, asked_height = size
    if asked_width is None:
        asked_width = int(image.size[0] *
                          (float(asked_height) / image.size[1]))
    if asked_height is None:
        asked_height = int(image.size[1] *
                           (float(asked_width) / image.size[0]))
    size = asked_width, asked_height

    # check image size: do not create a thumbnail if avoiding smaller images
    if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
        return base64_source

    if image.size <> size:
        # If you need faster thumbnails you may use use Image.NEAREST
        image = ImageOps.fit(image, size, Image.ANTIALIAS)
    if image.mode not in ["1", "L", "P", "RGB", "RGBA"]:
        image = image.convert("RGB")

    background_stream = StringIO.StringIO()
    image.save(background_stream, filetype)
    return background_stream.getvalue().encode(encoding)
예제 #31
0
laststatus = -1

while 1:
    ret, img = cap.read()
    cv2.rectangle(img, (300, 300), (100, 100), (0, 255, 0), 0)
    cv2.imshow("camera", img)
    crop_img = img[100:300, 100:300]
    ycrcb = cv2.cvtColor(crop_img, cv2.COLOR_BGR2YCrCb)  # 分解为YUV图像,得到CR分量
    (_, cr, _) = cv2.split(ycrcb)
    cr1 = cv2.GaussianBlur(cr, (35, 35), 0)  # 高斯滤波
    _, thresh1 = cv2.threshold(cr1, 0, 255, cv2.THRESH_BINARY +
                               cv2.THRESH_OTSU)  # OTSU图像二值化
    cv2.imshow("image1", thresh1)
    thresh1 = (thresh1 * 1.0) / 255
    thresh1 = Image.fromarray(thresh1)
    thresh1 = ImageOps.fit(thresh1, [par.image_size, par.image_size])
    if par.threshold:
        testImage = np.reshape(thresh1,
                               [-1, par.image_size, par.image_size, 1])
    else:
        testImage = np.reshape(thresh1,
                               [-1, par.image_size, par.image_size, 3])
    testImage = testImage.astype(np.float32)
    testY = sess.run(prediction, feed_dict={X: testImage, keep_prob: 1.0})
    if testY[0][1] == 1:
        if laststatus != 1 & lastlaststatus != 1:
            print('暂停')
            win32api.keybd_event(VK_CODE['ctrl'], 0, 0, 0)
            win32api.keybd_event(VK_CODE['alt'], 0, 0, 0)
            win32api.keybd_event(VK_CODE['p'], 0, 0, 0)
            win32api.keybd_event(VK_CODE['p'], 0, win32con.KEYEVENTF_KEYUP, 0)
예제 #32
0
 def resizeImage(self, qualityMode=None):
     upscale = self.opt.upscale
     stretch = self.opt.stretch
     bordersColor = self.opt.bordersColor
     if qualityMode is None:
         qualityMode = self.opt.quality
     if bordersColor:
         fill = bordersColor
     else:
         fill = self.fill
     # Set target size
     if qualityMode == 0:
         size = (self.size[0], self.size[1])
     elif qualityMode == 1 and not stretch and not upscale and self.image.size[0] <=\
             self.size[0] and self.image.size[1] <= self.size[1]:
         size = (self.size[0], self.size[1])
     elif qualityMode == 1:
         # Forcing upscale to make sure that margins will be not too big
         if not stretch:
             upscale = True
         size = (self.panelviewsize[0], self.panelviewsize[1])
     elif qualityMode == 2 and not stretch and not upscale and self.image.size[0] <=\
             self.size[0] and self.image.size[1] <= self.size[1]:
         self.purge = True
         return self.image
     else:
         self.hq = True
         size = (self.panelviewsize[0], self.panelviewsize[1])
     # If stretching is on - Resize without other considerations
     if stretch:
         if self.image.size[0] <= size[0] and self.image.size[1] <= size[1]:
             method = Image.BICUBIC
         else:
             method = Image.LANCZOS
         self.image = self.image.resize(size, method)
         return self.image
     # If image is smaller than target resolution and upscale is off - Just expand it by adding margins
     if self.image.size[0] <= size[0] and self.image.size[1] <= size[
             1] and not upscale:
         borderw = int((size[0] - self.image.size[0]) / 2)
         borderh = int((size[1] - self.image.size[1]) / 2)
         # PV is disabled when source image is smaller than device screen and upscale is off
         if self.image.size[0] <= self.size[0] and self.image.size[
                 1] <= self.size[1]:
             self.noPV = True
         self.image = ImageOps.expand(self.image,
                                      border=(borderw, borderh),
                                      fill=fill)
         # Border can't be float so sometimes image might be 1px too small/large
         if self.image.size[0] != size[0] or self.image.size[1] != size[1]:
             self.image = ImageOps.fit(self.image,
                                       size,
                                       method=Image.BICUBIC,
                                       centering=(0.5, 0.5))
         return self.image
     # Otherwise - Upscale/Downscale
     ratioDev = float(size[0]) / float(size[1])
     if (float(self.image.size[0]) / float(self.image.size[1])) < ratioDev:
         diff = int(self.image.size[1] * ratioDev) - self.image.size[0]
         self.image = ImageOps.expand(self.image,
                                      border=(int(diff / 2), 0),
                                      fill=fill)
     elif (float(self.image.size[0]) /
           float(self.image.size[1])) > ratioDev:
         diff = int(self.image.size[0] / ratioDev) - self.image.size[1]
         self.image = ImageOps.expand(self.image,
                                      border=(0, int(diff / 2)),
                                      fill=fill)
     if self.image.size[0] <= size[0] and self.image.size[1] <= size[1]:
         method = Image.BICUBIC
     else:
         method = Image.LANCZOS
     self.image = ImageOps.fit(self.image,
                               size,
                               method=method,
                               centering=(0.5, 0.5))
     return self.image
예제 #33
0
def download_images(tweet_id: int):
    """
    Downloads images from Twitter and makes them into one image with Pillow.

    tweet_id is snowflake ID for tweet.

    Returns json with url for our created image. If tweet only has one image we
    send that instead of creating our own.
    """
    try:
        images = []
        tweet = api.get_status(tweet_id, tweet_mode="extended")
        links = link_list(
            tweet)  # Generates a list with all the images in the tweet.
        x_offset = 0

        for link in links:
            with tempfile.SpooledTemporaryFile() as tmp:
                print("Trying to download " + link)
                response = requests.get(link)
                tmp.write(response.content)

                # Crop to 512 by 512 pixels
                thumb = ImageOps.fit(Image.open(tmp), (512, 512),
                                     Image.ANTIALIAS)

                # Create temp file to store the cropped image, we remove it manually later
                filename = tempfile.NamedTemporaryFile(suffix=".png",
                                                       delete=False)

                # Save the crop
                thumb.save(filename)
                print(f"Saved {filename.name} ({link})")

                # Add the path to list so we can combine them later
                images.append(str(filename.name))

        if len(links) == 1:
            print("Found 1 link")
            image_url = (
                tweet.extended_entities["media"][0]["media_url_https"].replace(
                    ".png",
                    "?format=png&name=orig").replace(".jpg",
                                                     "?format=jpg&name=orig")
            )  # Get better quality

            return {
                "url": image_url,
            }

        if len(links) == 2:
            print("Found 2 links")
            imgs = list(map(Image.open, (images[0], images[1])))
            new_im = Image.new("RGB", (1024, 512))

            for img in imgs:
                new_im.paste(img, (x_offset, 0))
                x_offset += img.size[0]

        if len(links) == 3:
            print("Found 3 links")
            imgs = list(map(Image.open, (images[0], images[1], images[2])))
            new_im = Image.new("RGB", (1536, 512))

            for img in imgs:
                new_im.paste(img, (x_offset, 0))
                x_offset += img.size[0]

        if len(links) == 4:
            print("Found 4 links")
            imgs = list(
                map(Image.open, (images[0], images[1], images[2], images[3])))
            new_im = Image.new("RGB", (1024, 1024))

            new_im.paste(imgs[0], (0, 0))
            new_im.paste(imgs[1], (512, 0))
            new_im.paste(imgs[2], (0, 512))
            new_im.paste(imgs[3], (512, 512))

        # Save our merged image
        new_im.save(f"static/tweets/{tweet_id}.png")
        print(
            f"Saved merged image for https://twitter.com/i/status/{tweet_id}")
        return {
            "url": f"{url}/static/tweets/{tweet_id}.png",
        }
    except Exception as e:
        print("Error: " + str(e))
        notify_discord(
            f"errored for https://twitter.com/i/status/{tweet_id}\n{e} "
            f"<@{discord_username}>")
    finally:
        filename.close()
        for image in images:
            print(f"Removing {image}")
            os.remove(str(image))
예제 #34
0
def image_manipulation(url, pixelSize):

    # open the image using the url
    image = Image.open(urlopen(url))

    # RESIZE THE IMAGE BASED ON THE GIVEN PIXELS
    image = image.resize((math.floor(
        image.height / pixelSize), math.floor(image.height / pixelSize)),
                         Image.NEAREST)
    image = ImageOps.fit(image, (pixelSize, pixelSize), centering=(0.5, 0.5))
    image = ImageOps.posterize(image, 3)

    # SIMPLIFY THE IMAGE's COLORS
    colors = [255, 170, 120, 85, 0]
    original_color_count = {}
    color_count = {}

    # Loop through every pixel in the image and modify it so it only uses the allowed values
    for w in range(image.width):
        for h in range(image.height):
            current_color = image.getpixel((w, h))

            if current_color in original_color_count:
                original_color_count[current_color] += 1
            else:
                original_color_count[current_color] = 1

            r, g, b = current_color
            r_set = False
            g_set = False
            b_set = False

            #  Loop through our allowed values and find the closest value to snap to
            # pylint: disable=C0200
            for i in range(len(colors)):
                color_one = colors[i]
                color_two = colors[i + 1]

                if not r_set:
                    if color_one >= r >= color_two:
                        distance_one = color_one - r
                        distance_two = r - color_two
                        r = color_one if distance_one <= distance_two else color_two
                        r_set = True

                if not g_set:
                    if color_one >= g >= color_two:
                        distance_one = color_one - g
                        distance_two = g - color_two
                        g = color_one if distance_one <= distance_two else color_two
                        g_set = True

                if not b_set:
                    if color_one >= b >= color_two:
                        distance_one = color_one - b
                        distance_two = b - color_two
                        b = color_one if distance_one <= distance_two else color_two
                        b_set = True

                if all((r_set, g_set, b_set)):
                    break

            # Set our new pixel back on the image to see the difference
            new_rgb = (r, g, b)
            image.putpixel((w, h), new_rgb)

            if new_rgb in color_count:
                color_count[new_rgb] += 1
            else:
                color_count[new_rgb] = 1

    return image
예제 #35
0
    def POST(self):
        x = web.input(myfile={})
        filedir = '/workspace/se-alesDeTransito/senales/static/'
        if 'myfile' in x:
            filepath = x.myfile.filename.replace('\\', '/')
            filename = filepath.split('/')[-1]
            fout = open(filedir + '/' + filename, 'wb')
            fout.write(x.myfile.file.read())
            fout.close()
            np.set_printoptions(suppress=True)

            # Load the model
            model = tensorflow.keras.models.load_model(
                '/workspace/se-alesDeTransito/senales/static/keras_model.h5')

            # Create the array of the right shape to feed into the keras model
            # The 'length' or number of images you can put into the array is
            # determined by the first position in the shape tuple, in this case 1.
            data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

            # Replace this with the path to your image
            image = Image.open('/workspace/se-alesDeTransito/senales/static/' +
                               filename)
            #resize the image to a 224x224 with the same strategy as in TM2:
            #resizing the image to be at least 224x224 and then cropping from the center
            size = (224, 224)
            image = ImageOps.fit(image, size, Image.ANTIALIAS)

            #turn the image into a numpy array
            image_array = np.asarray(image)

            # display the resized image
            image.show()

            # Normalize the image
            normalized_image_array = (image_array.astype(np.float32) /
                                      127.0) - 1

            # Load the image into the array
            data[0] = normalized_image_array

            # run the inference
            prediction = model.predict(data)

            for i in prediction:
                if i[0] > 0.85:
                    titulo = "derrumbes"
                    resultado = "La imagen es una señal de zona de derrumbes."
                    descripcion = "Advierte sobre una zona en la cual pueden ocurrir derrumbes."
                    status = 200

                elif i[1] > 0.85:
                    titulo = "doble"
                    resultado = "La imagen es una señal de doble circulación."
                    descripcion = "El señalamiento se utiliza para marcar el camino de circulación en un solo sentido o en doble sentido."
                    status = 200

                elif i[2] > 0.85:
                    titulo = "intersección"
                    resultado = "La imagen es una señal de intersección (entronque) de 4 vías."
                    descripcion = "La línea mas ancha señalara el camino principal, mientras que la mas angosta el camino secundario."
                    status = 200

                elif i[3] > 0.85:
                    titulo = "lateral"
                    resultado = "La imagen es una señal de incorporación de transito."
                    descripcion = "Este tipo de señalamiento avisa sobre la incorporación de transito que va en la misma."
                    status = 200

                elif i[4] > 0.85:
                    titulo = "peatón"
                    resultado = "La imagen es una señal de peatón."
                    descripcion = "Indica un camino con constante paso peatonal o cruce peatonal en específico."
                    status = 200

                elif i[5] > 0.85:
                    titulo = "tope"
                    resultado = "La imagen es una señal de tope."
                    descripcion = "Advierte la proximidad de una protuberancia en la superficie de la vía"
                    status = 200

                else:
                    titulo = "error"
                    resultado = "No pudimos interpretar la imagen, intenta de nuevo."
                    descripcion = "La imagen no pertenece a una señal o aún no es entrenada."
                    status = 404

                if path.exists(filedir + filename):
                    remove(filedir + filename)
        datos = {titulo: []}

        senal = {}
        senal["resultado"] = resultado
        senal["descripcion"] = descripcion
        senal["status"] = status
        datos[titulo].append(senal)
        return json.dumps(datos)
예제 #36
0
from PIL import Image, ImageOps
from os import listdir
from os.path import isfile, join

# ONLY put image files in this folder
InputPath = './input'
# The resized images will be placed in this folder
OutputPath = './output'
# Output Rez x, y
ImageResize = (266, 375)
# Will Overwrite files if True
Overwrite = False

inputDir = [f for f in listdir(InputPath) if isfile(join(InputPath, f))]
outputDir = [f for f in listdir(OutputPath) if isfile(join(OutputPath, f))]


for fileName in inputDir:
    if fileName not in outputDir or Overwrite == True:
        filePath = InputPath + "/" + fileName

        image = Image.open(filePath)
        image = ImageOps.fit(image, ImageResize, Image.ANTIALIAS, 0, (0.5, 0.5))
        image.save(f'{OutputPath}/{fileName}')
                
        print("Successfully Resized " + fileName + 'To ' + str(ImageResize))
    else:
        print(fileName + 'Already in output directory')


예제 #37
0
def generate_collection_thumbnail(collection, width, heigth):
    MARGIN = int(width / 30)
    MID_MARGIN = int(width / 90)
    BG = (13, 2, 59)
    DISPLAY_GRANTS_LIMIT = 4
    PROFILE_WIDTH = PROFILE_HEIGHT = int(width / 3.5)
    GRANT_WIDTH = int(width / 2) - MARGIN - MID_MARGIN
    GRANT_HEIGHT = int(heigth / 2) - MARGIN - MID_MARGIN
    IMAGE_BOX = (width, heigth)
    LOGO_SIZE_DIFF = int(GRANT_WIDTH / 5)
    HALF_LOGO_SIZE_DIFF = int(LOGO_SIZE_DIFF / 2)
    PROFILE_BOX = (PROFILE_WIDTH - LOGO_SIZE_DIFF,
                   PROFILE_HEIGHT - LOGO_SIZE_DIFF)
    GRANT_BOX = (GRANT_WIDTH, GRANT_HEIGHT)
    media_url = '' if 'media' not in MEDIA_URL else BASE_URL[:-1]

    grants = collection.grants.all()

    logos = []
    for grant in grants:
        if grant.logo:
            if len(logos) > DISPLAY_GRANTS_LIMIT:
                break
            grant_url = f'{media_url}{grant.logo.url}'
            print(f'Trying to get: ${grant_url}')
            fd = urllib.request.urlopen(grant_url)
            logos.append(fd)
        else:
            static_file = f'assets/v2/images/grants/logos/{grant.id % 3}.png'
            logos.append(static_file)

    for logo in range(len(logos), 4):
        logos.append(None)

    thumbail = Image.new('RGBA', IMAGE_BOX, color=BG)
    avatar_url = f'{media_url}{collection.profile.avatar_url}'
    fd = urllib.request.urlopen(avatar_url)

    # Make rounder profile avatar img
    mask = Image.new('L', PROFILE_BOX, 0)
    draw = ImageDraw.Draw(mask)
    draw.ellipse((0, 0) + PROFILE_BOX, fill=255)
    profile_thumbnail = Image.open(fd)

    profile_thumbnail.thumbnail(PROFILE_BOX, Image.ANTIALIAS)
    profile_circle = ImageOps.fit(profile_thumbnail,
                                  mask.size,
                                  centering=(0.5, 0.5))

    try:
        applied_mask = profile_circle.copy()
        applied_mask.putalpha(mask)
        profile_circle.paste(applied_mask, (0, 0), profile_circle)
    except ValueError:
        profile_circle.putalpha(mask)

    CORNERS = [
        [MARGIN, MARGIN],  # Top left grant
        [width - GRANT_WIDTH - MARGIN, MARGIN],  # Top right grant
        [MARGIN, heigth - GRANT_HEIGHT - MARGIN],  # bottom left grant
        [width - GRANT_WIDTH - MARGIN,
         heigth - GRANT_HEIGHT - MARGIN]  # bottom right grant
    ]

    for index in range(4):
        if logos[index] is None:
            grant_bg = Image.new('RGBA', GRANT_BOX, color='white')
            thumbail.paste(grant_bg, CORNERS[index], grant_bg)
            continue

        if type(logos[index]) is not str and re.match(r'.*\.svg',
                                                      logos[index].url):
            grant_img = convert_img(logos[index])
            grant_thumbail = Image.open(grant_img)
        else:
            try:
                grant_thumbail = Image.open(logos[index])
            except ValueError:
                grant_thumbail = Image.open(logos[index]).convert("RGBA")

        grant_thumbail.thumbnail(GRANT_BOX, Image.ANTIALIAS)

        grant_bg = Image.new('RGBA', GRANT_BOX, color='white')

        try:
            grant_bg.paste(
                grant_thumbail,
                (int(GRANT_WIDTH / 2 - grant_thumbail.size[0] / 2),
                 int(GRANT_HEIGHT / 2 - grant_thumbail.size[1] / 2)),
                grant_thumbail)
        except ValueError:
            grant_bg.paste(
                grant_thumbail,
                (int(GRANT_WIDTH / 2 - grant_thumbail.size[0] / 2),
                 int(GRANT_HEIGHT / 2 - grant_thumbail.size[1] / 2)))

        thumbail.paste(grant_bg, CORNERS[index], grant_bg)

    draw_on_thumbnail = ImageDraw.Draw(thumbail)
    draw_on_thumbnail.ellipse([(int(width / 2 - PROFILE_WIDTH / 2),
                                int(heigth / 2 - PROFILE_HEIGHT / 2)),
                               (int(width / 2 + PROFILE_WIDTH / 2),
                                int(heigth / 2 + PROFILE_HEIGHT / 2))],
                              fill="#0D013B")

    try:
        thumbail.paste(
            profile_circle,
            (int(width / 2 - PROFILE_WIDTH / 2) + HALF_LOGO_SIZE_DIFF,
             int(heigth / 2 - PROFILE_HEIGHT / 2) + HALF_LOGO_SIZE_DIFF),
            profile_circle)
    except ValueError:
        thumbail.paste(
            profile_circle,
            (int(width / 2 - PROFILE_WIDTH / 2) + HALF_LOGO_SIZE_DIFF,
             int(heigth / 2 - PROFILE_HEIGHT / 2) + HALF_LOGO_SIZE_DIFF))

    return thumbail
예제 #38
0
    lot = story['lot']
    features.append({
        'type': 'Feature',
        'geometry': geom,
        'properties': {
            'bbl': [boro, block, lot],
            'addr': story['address'],
            'text': story['text'],
            'id': i
        }
    })
    path = "www-data/photos/{0}/{1:05d}/{2:04d}.jpg".format(boro, block, lot)
    if os.path.isfile(path):
        img = Image.open(path)
        size = (THUMB_SIZE, THUMB_SIZE)
        thumb = ImageOps.fit(img, size, method=Image.ANTIALIAS)
        x = THUMB_SIZE * (i % (ATLAS_SIZE / THUMB_SIZE))
        y = THUMB_SIZE * (i / (ATLAS_SIZE / THUMB_SIZE))
        atlas.paste(thumb, (x, y))
        sprite_entries["story_{0}".format(i)] = {
            "width": THUMB_SIZE,
            "height": THUMB_SIZE,
            "x": x,
            "y": y,
            "pixelRatio": 1
        }

atlas.save("www/public/images/atlas.png")
atlas.save("www/public/images/[email protected]")
# write atlas.json
for x in ["www/public/images/atlas.json", "www/public/images/[email protected]"]:
예제 #39
0
def get_data(val_split: float, test_split: float, randomize: bool):
  # Get top roof images
  img_directory = "C:\\Images"
  top_file_name = "top.png"
  img_size = 299, 299
  keys = os.listdir(img_directory)
  imgs = []
  for name in keys:
    top = Image.open(img_directory + "\\" + name + "\\" + top_file_name).convert("RGB")
    top_fitted = ImageOps.fit(top, img_size, Image.ANTIALIAS)
    top_arr = np.array(top_fitted, dtype="int32")
    imgs.append(top_arr)

  images = dict(sorted(zip(keys, imgs), key=lambda x: x[0]))

  # Get order data
  orders = pd.read_csv('C:\\Predictions\\predictions.csv')

  # Make sure our count of data is correct
  assert (len(images) == len(orders))
  print ("Using " + str(len(images)) + " total orders.")

  # randomize orders dataframe
  if (randomize == True):
    orders = orders.sample(frac=1).reset_index(drop=True)
    splitIndex = int(len(orders) * val_split)

  if (val_split > 0.0):
    features_train = orders.iloc[splitIndex:, :-2]
    features_test = orders.iloc[:splitIndex:, :-2]
    output_train = orders.iloc[splitIndex:, -2:]
    output_test = orders.iloc[:splitIndex, -2:]
  else:
    features_train = orders.iloc[:, :-2]
    features_test = pd.DataFrame([])
    output_train = orders.iloc[:, -2:]
    output_test = pd.DataFrame([])

  # Construct image inputs in same order as randomized feature inputs
  images_train = []
  images_test = []
  for order in features_train["order_id"]:
    images_train.append(images[str(order)])
  if (val_split > 0.0):
    for order in features_test["order_id"]:
      images_test.append(images[str(order)])
  
  features_train.drop(labels="order_id", axis=1, inplace=True)
  if (val_split > 0.0):
    features_test.drop(labels="order_id", axis=1, inplace=True)

  features_norm_train, _ = NormalizeDataframe(features_train)
  features_norm_test, _ = NormalizeDataframe(features_test)
  output_norm_train, ranges_output_train = NormalizeDataframe(output_train)
  output_norm_test, ranges_output_test = NormalizeDataframe(output_test)
  images_norm_train = NormalizeArray(np.array(images_train))
  images_norm_test = NormalizeArray(np.array(images_test))

  if (val_split > 0.0):
    output_test = (output_test["area_slope"].values, output_test["area_intercept"].values)
    output_norm_test = (output_norm_test["area_slope"].values, output_norm_test["area_intercept"].values)
  else:
    output_test = ()
    output_norm_test = ()
  return {
    DataKeys.RawFeaturesTrain: (images_train, features_train.values),
    DataKeys.RawFeaturesTest: (images_test, features_test.values),
    DataKeys.NormalizedFeaturesTrain: (images_norm_train, features_norm_train.values),
    DataKeys.NormalizedFeaturesTest: (images_norm_test, features_norm_test.values),
    DataKeys.RawOutputTrain: (output_train["area_slope"].values, output_train["area_intercept"].values),
    DataKeys.RawOutputTest: output_test,
    DataKeys.NormalizedOutputTrain: (output_norm_train["area_slope"].values, output_norm_train["area_intercept"].values),
    DataKeys.NormalizedOutputTest: output_norm_test,
    DataKeys.OutputRangesTrain: ranges_output_train,
    DataKeys.OutputRangesTest: ranges_output_test
  }
예제 #40
0
def resize_pic(in_name, size):
    img = Image.open(in_name)
    img = ImageOps.fit(img, (size, size), Image.ANTIALIAS)
    return img
예제 #41
0
import tkinter as tk

mainWindowSize = "1350x690+0+0"
win = tk.Tk()
win.title("Picture")
win.geometry(mainWindowSize)
win.resizable(0, 0)
win.configure(background='white')

#image label settings
Height = 100
Width = 100
Left = 0
Top = 0
size = (Width + 3, Height + 3)
Path = 'j.jpg'

#view image
from PIL import ImageTk, Image, ImageOps
img = ImageTk.PhotoImage(ImageOps.fit(Image.open(Path), size, Image.ANTIALIAS))
name = tk.Label(win, image=img, width=Width, height=Height)
name.place(x=Left, y=Top)

win.mainloop()
예제 #42
0
    async def welcomeimage(self, member, url):
        async with aiohttp.ClientSession() as session:
            async with session.get(f'{member.avatar_url}') as resp:
                profile_bytes = await resp.read()

        async with aiohttp.ClientSession() as session:
            async with session.get(url) as resp:
                background = await resp.read()

        big_size = (200, 200)
        background_size = (1000, 400)

        profile_bytes = Image.open(BytesIO(profile_bytes)).convert("RGB")
        background = Image.open(BytesIO(background)).convert("RGB")

        background.thumbnail(background_size)
        profile_bytes.thumbnail(big_size)

        White = Image.new("RGBA", (210, 210), "white")
        mask = Image.new("L", profile_bytes.size, 0)
        mask2 = Image.new("L", White.size, 0)

        draw = ImageDraw.Draw(mask)
        draw.ellipse((0, 0) + big_size, fill=255)
        profile_bytes = ImageOps.fit(profile_bytes,
                                     mask.size,
                                     centering=(0.5, 0.5))
        profile_bytes.putalpha(mask)

        draw = ImageDraw.Draw(mask2)
        draw.ellipse((0, 0) + White.size, fill=255)
        White = ImageOps.fit(White, mask2.size, centering=(0.5, 0.5))
        White.putalpha(mask2)

        width, height = background.size
        big_size = (int((width - 200) / 2), int((height - 300) / 2))
        big_size2 = (int((width - 210) / 2), int((height - 310) / 2))
        welcomeH = int((height + 100) / 2)

        background.paste(White, big_size2, mask2)
        background.paste(profile_bytes, big_size, mask)
        bfont = ImageFont.truetype('ARIBLK.TTF', 58)
        font = ImageFont.truetype('ARIBLK.TTF', 38)

        im_draw = ImageDraw.Draw(background)
        text = 'WELCOME'
        w, H = draw.textsize(text, font=bfont)

        im_draw.text(((width - w) / 2, welcomeH),
                     text,
                     font=bfont,
                     fill=(255, 255, 255, 255))

        text = str(member.name)
        w, h = draw.textsize(text, font=font)
        im_draw.text(((width - w) / 2, welcomeH + h + 10),
                     text,
                     font=font,
                     fill=(255, 255, 255, 255))

        buffer = BytesIO()
        background.save(buffer, 'png')
        buffer.seek(0)

        return buffer
예제 #43
0
for i, elem in enumerate(trainImages):
    h, w, d = elem.shape
    new_size = max(h, w)
    img = Image.fromarray(elem, 'RGB')
    delta_h = new_size - h
    delta_w = new_size - w
    padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2),
               delta_h - (delta_h // 2))
    new_im = ImageOps.expand(img, padding)
    trainImages[i] = numpy.array(new_im)

#Image resize
for i, elem in enumerate(trainImages):
    size = image_size
    img = Image.fromarray(elem, 'RGB')
    trainImages[i] = numpy.array(ImageOps.fit(img, size, Image.ANTIALIAS))

# Split a dataset into a train and test set
train_images = []
test_images = trainImages
train_labels = []
test_labels = trainLabels

train_size = 0.8 * len(trainImages)
for_shuffle = []

#Take random images and form train data
while len(for_shuffle) * 30 < train_size:
    index = randrange(len(test_images) // 30)
    subarray = []
    for k in range(30):
예제 #44
0
from PIL import Image
from PIL import ImageOps
import os, sys

for name in os.listdir('images/gallery'):
	try:
		out = name[:-4] + "_thumb.jpg"
		img = Image.open(os.path.join('images/gallery/', name))
		img = ImageOps.fit(img, (256, 256), Image.ANTIALIAS)
		img.save(os.path.join('out/gallery/', out), 'JPEG')
	except IOError as e:
		print "failed to create thumbnail"
		print e
def resize_image(image, width, height):
    img = Image.open(image)
    img = ImageOps.fit(img, (width, height), Image.ANTIALIAS)
    img = np.asarray(img, np.float32)
    return np.expand_dims(img,0)  # add to the origin dimension of image
예제 #46
0
            def process_image():
                # open
                with \
                        Image.open('./assets/images/template.png') as template, \
                        Image.open(BytesIO(avatar_data)) as av, \
                        Image.open('./assets/images/border.png') as circle, \
                        Image.open(BytesIO(im)) as image:

                    # resize
                    circle = circle.resize(size=(235, 235))
                    size = image.size
                    multiplier = 900 / size[0]
                    image = image.resize(size=(900, int(size[1] * multiplier)))
                    image = ImageOps.fit(image, size=(900, 240))
                    # darken
                    enhancer = ImageEnhance.Brightness(image)
                    image = enhancer.enhance(0.5)
                    template.paste(image, (20, 25))
                    x = int(365 * ratio) + 355

                    if m.color == discord.Color.default():
                        c = (255, 255, 255)
                    else:
                        c = m.color.to_rgb()

                    avatar_size = int(template.size[1] * 2 / 3)
                    av = av.resize((avatar_size, avatar_size))
                    av = av.convert(mode='RGBA')

                    im_a = Image.new("L", av.size, 0)
                    draw = ImageDraw.Draw(im_a)
                    draw.ellipse([(0, 0), av.size], fill=255)
                    template.paste(av, (40, 45), im_a)
                    draw = ImageDraw.Draw(template, mode='RGBA')
                    draw.rectangle([(355, 175), (720, 200)],
                                   fill=(169, 169, 169, 255),
                                   outline=(0, 0, 0, 255))
                    draw.rectangle([(355, 175), (x, 200)],
                                   fill=color,
                                   outline=(0, 0, 0, 255))
                    font = ImageFont.truetype('./assets/fonts/mono.ttf', 38)
                    size = font.getsize(str(current_level))[0]
                    x = 325 - size
                    draw.text(xy=(x, 170),
                              font=font,
                              fill=(255, 255, 255, 255),
                              text=str(current_level))
                    draw.text(xy=(750, 170),
                              font=font,
                              fill=(255, 255, 255, 255),
                              text=str(current_level + 1))
                    font = ImageFont.truetype('./assets/fonts/ubuntu.ttf', 38)
                    border(draw=draw,
                           font=font,
                           xy=(300, 110),
                           text=f'Rank: ',
                           fill=(255, 255, 255, 255),
                           outline=(0, 0, 0, 255),
                           thiccness=2)
                    textlen = font.getsize("Rank: ")[0]
                    font = ImageFont.truetype('./assets/fonts/mono.ttf', 48)
                    border(xy=(410, 107),
                           draw=draw,
                           text=str(rank),
                           font=font,
                           fill=color,
                           thiccness=2,
                           outline=(0, 0, 0, 255))
                    ranklen = font.getsize(str(rank))[0]
                    totallen = textlen + ranklen + 315

                    if len(m.display_name) > 12:

                        if len(m.display_name) > 19:
                            text = m.display_name[:18]
                        else:
                            text = m.display_name

                        font = ImageFont.truetype('./assets/fonts/ubuntu.ttf',
                                                  36)
                        border(draw=draw,
                               xy=(300, 50),
                               text=text,
                               font=font,
                               fill=(c[0], c[1], c[2], 255),
                               outline=(0, 0, 0, 255),
                               thiccness=1)
                    else:
                        font = ImageFont.truetype('./assets/fonts/ubuntu.ttf',
                                                  45)
                        text = m.display_name
                        border(draw=draw,
                               xy=(295, 50),
                               text=text,
                               font=font,
                               fill=(c[0], c[1], c[2], 255),
                               outline=(0, 0, 0, 255),
                               thiccness=1)

                    x = font.getsize(text)[0] + 315

                    if x < totallen:
                        x = totallen + 5

                    draw.rectangle(xy=[(x, 55), (x + 5, 150)],
                                   fill=(255, 255, 255, 255),
                                   outline=(0, 0, 0, 255))
                    font = ImageFont.truetype('./assets/fonts/ubuntu.ttf', 32)
                    border(xy=(x + 30, 65),
                           draw=draw,
                           font=font,
                           text='LEVEL',
                           fill=(255, 255, 255, 255),
                           thiccness=2,
                           outline=(0, 0, 0, 255))
                    border(xy=(x + 30, 115),
                           draw=draw,
                           font=font,
                           text='TOTAL XP:',
                           fill=(255, 255, 255, 255),
                           thiccness=2,
                           outline=(0, 0, 0, 255))
                    font = ImageFont.truetype('./assets/fonts/mono.ttf', 62)
                    border(xy=(x + 130, 47),
                           font=font,
                           draw=draw,
                           text=str(current_level),
                           thiccness=2,
                           outline=(0, 0, 0, 255),
                           fill=color)
                    font = ImageFont.truetype('./assets/fonts/mono.ttf', 42)

                    if mx > 999:
                        member_xp = f'{round(mx / 1000, 1)}K'
                    else:
                        member_xp = str(mx)

                    border(xy=(x + 190, 112),
                           font=font,
                           draw=draw,
                           text=member_xp,
                           thiccness=2,
                           outline=(0, 0, 0, 255),
                           fill=color)

                    if nlr:
                        role_color = nlr.color.to_rgb()
                        color_tuple = (role_color[0], role_color[1],
                                       role_color[2], 255)
                        role_name = nlr.name.split(' | ')[0]
                        levels_to = nl - current_level

                        if levels_to == 1:
                            s = ''
                        else:
                            s = 's'

                        font = ImageFont.truetype('./assets/fonts/mono.ttf',
                                                  35)
                        text = f'{progress}/{total_xp}'
                        border(draw=draw,
                               font=font,
                               thiccness=1,
                               text=text,
                               fill=color,
                               outline=(0, 0, 0, 255),
                               xy=(300, 220))
                        size = font.getsize(text)[0]
                        text = f' XP | {levels_to} Level{s} to '
                        font = ImageFont.truetype("./assets/fonts/ubuntu.ttf",
                                                  30)
                        border(draw=draw,
                               font=font,
                               thiccness=1,
                               text=text,
                               fill=(255, 255, 255, 255),
                               outline=(0, 0, 0, 255),
                               xy=(300 + size, 220))
                        text_length = font.getsize(text)[0]
                        border(draw=draw,
                               font=font,
                               thiccness=1,
                               text=role_name,
                               fill=color_tuple,
                               outline=(0, 0, 0, 255),
                               xy=(300 + size + text_length, 220))
                    else:
                        font = ImageFont.truetype('./assets/fonts/mono.ttf',
                                                  35)
                        text = f'{progress}/{total_xp}'
                        border(draw=draw,
                               font=font,
                               thiccness=1,
                               text=text,
                               fill=color,
                               outline=(0, 0, 0, 255),
                               xy=(300, 220))
                        size = font.getsize(text)[0]
                        text = ' XP to Next Level'
                        font = ImageFont.truetype("./assets/fonts/ubuntu.ttf",
                                                  30)
                        border(draw=draw,
                               font=font,
                               thiccness=1,
                               text=text,
                               fill=(255, 255, 255, 255),
                               outline=(0, 0, 0, 255),
                               xy=(300 + size, 220))

                    template.paste(circle, (15, 18), circle)

                    buffer = BytesIO()
                    template.save(buffer, 'png')
                    buffer.seek(0)
                    return buffer
예제 #47
0


from PIL import Image, ImageOps, ImageEnhance
import glob, os


# 5120 x 2880 # max resolution
# 2560 x 1440 # default
# 3200 x 1800

for infile in glob.glob('original/*'):

    f = infile.split('/')[-1]
    im = Image.open(infile)

    print(f, im.size)

    thumb = ImageOps.fit(im, (180,180), method=Image.ANTIALIAS, bleed=0.0, centering=(0.5, 0.5))
    thumb.save(f'thumb/{f}')

    thumbg = ImageEnhance.Color(thumb).enhance(0.2)
    thumbg.save(f'thumbg/{f}')

    ipad = ImageOps.fit(im, (820,461), method=Image.ANTIALIAS, bleed=0.0, centering=(0.5, 0.5))
    ipad.save(f'preview_ipad/{f}')

    console = ImageOps.fit(im, (1080,1080), method=Image.ANTIALIAS, bleed=0.0, centering=(0.5, 0.5))
    console.save(f'preview_console/{f}')
예제 #48
0
파일: app.py 프로젝트: david-solis/btc-demo
import keras
import numpy as np
import streamlit as st
from PIL import Image, ImageOps

IMG_SIZE = (224, 224)
MODEL_FILE = 'model.h5'

st.title("Brain Tumor Classifier")
st.header("End-to-end Learning")

uploaded_file = st.file_uploader("Please upload a brain MRI scan", type="jpg")
if uploaded_file is not None:
    image = Image.open(uploaded_file)
    st.image(image, caption='Uploaded MRI brain scan.', use_column_width=True)
    st.write("")
    st.write("Classifying...")
    # Load model
    model = keras.models.load_model(MODEL_FILE)
    # Convert to the image format expected by the model
    image = ImageOps.fit(image, IMG_SIZE, Image.ANTIALIAS).convert('L')
    image_array = np.expand_dims(np.array(image), axis=2)
    image_array = image_array.reshape((1, ) + image_array.shape)

    prediction = model.predict(image_array)
    label = np.argmax(prediction)

    st.write(("Tumor" if label else "No tumor") + " detected")
예제 #49
0
def image_to_thumbnail(image):
    return ImageOps.fit(image, (size, size), Image.ANTIALIAS)
예제 #50
0
def square_image(input_f, max_size, quality=80):
    image, format = pil_image(input_f, quality)
    max_size = min(image.size[0], image.size[1], max_size)
    image = ImageOps.fit(image, size=(max_size, max_size))
    return image_to_file(image, format, quality)
예제 #51
0
파일: embed.py 프로젝트: msuess/web
def avatar(request):
    # default response
    could_not_find = Image.new('RGBA', (1, 1), (0, 0, 0, 0))
    err_response = HttpResponse(content_type="image/jpeg")
    could_not_find.save(err_response, "JPEG")

    # params
    repo_url = request.GET.get('repo', False)
    if not repo_url or 'github.com' not in repo_url:
        return err_response

    try:
        # get avatar of repo
        _org_name = org_name(repo_url)

        avatar = None
        filename = "{}.png".format(_org_name)
        filepath = 'assets/other/avatars/' + filename
        try:
            avatar = Image.open(filepath, 'r').convert("RGBA")
        except IOError:
            remote_user = get_user(_org_name)
            if not remote_user.get('avatar_url', False):
                return JsonResponse({'msg': 'invalid user'}, status=422)
            remote_avatar_url = remote_user['avatar_url']

            r = requests.get(remote_avatar_url, stream=True)
            chunk_size = 20000
            with open(filepath, 'wb') as fd:
                for chunk in r.iter_content(chunk_size):
                    fd.write(chunk)
            avatar = Image.open(filepath, 'r').convert("RGBA")

            # make transparent
            datas = avatar.getdata()

            new_data = []
            for item in datas:
                if item[0] == 255 and item[1] == 255 and item[2] == 255:
                    new_data.append((255, 255, 255, 0))
                else:
                    new_data.append(item)

            avatar.putdata(new_data)
            avatar.save(filepath, "PNG")

        width, height = (215, 215)
        img = Image.new("RGBA", (width, height), (255, 255, 255))

        # config
        icon_size = (215, 215)
        # execute
        avatar = ImageOps.fit(avatar, icon_size, Image.ANTIALIAS)
        bg_w, bg_h = img.size
        offset = 0, 0
        img.paste(avatar, offset, avatar)

        response = HttpResponse(content_type="image/jpeg")
        img.save(response, "JPEG")
        return response
    except IOError as e:
        print(e)
        return err_response
예제 #52
0
def upload():

    udata = {}
    name = request.form['name']
    udata['name'] = str(name)
    email = request.form['email']
    udata['email'] = str(email)
    date = request.form['date']
    udata['date'] = str(date)
    gender = request.form['gender']
    udata['gender'] = str(gender)
    bloodgroup = request.form['bloodgroup']
    udata['bloodgroup'] = str(bloodgroup)
    city = request.form['city']
    udata['city'] = str(city)
    country = request.form['country']
    udata['country'] = str(country)

    target = os.path.join(APP_ROOT, 'files/')
    print("target", target)

    if not os.path.isdir(target):
        os.mkdir(target)

    for file in request.files.getlist("file"):
        print("file", file)
        print("file.filename", file.filename)
        filename = file.filename
        destination = "".join([target, filename])
        print("destination", destination)
        file.save(destination)
        #filename="../files/"+filename
        print(filename)

    print("start")
    np.set_printoptions(suppress=True)
    model = tensorflow.keras.models.load_model('keras_model.h5')
    data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

    image = Image.open(destination)
    size = (224, 224)
    image = ImageOps.fit(image, size, Image.ANTIALIAS)
    image_array = np.asarray(image)
    #image.show()
    normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
    data[0] = normalized_image_array

    label = [
        "actinic keratoses", "bullous impetigo", "dermatitis", "flea bites",
        "healthy skin", "lyme disease", "miliaria", "no skin present",
        "sunburn", "tinea pedis"
    ]

    prediction = model.predict(data)
    print(prediction)

    test_keys = label
    test_values = prediction.tolist()
    test_values = list(prediction[0])

    print(test_values)

    res = {}
    for key in test_keys:
        for value in test_values:
            res[key] = value
            test_values.remove(value)
            break

    val = max(res, key=res.get)
    print(val, res)
    print("end")

    print(udata)

    if (val == "no skin present"):
        return render_template("no_skin.html")
    elif (val == "healthy skin"):
        return render_template("healthy_skin.html")
    else:
        udata['disease'] = val
        fb = FirebaseApplication("https://skindoctor-e6294.firebaseio.com/",
                                 None)
        result = fb.post('patients', udata)
        if (val == "actinic keratoses"):
            return render_template("actinic_keratosis.html")
        elif (val == "bullous impetigo"):
            return render_template("bullous_impetigo.html")
        elif (val == "dermatitis"):
            return render_template("dermatitis.html")
        elif (val == "flea bites"):
            return render_template("flea_bites.html")
        elif (val == "lyme disease"):
            return render_template("lyme_disease.html")
        elif (val == "miliaria"):
            return render_template("miliaria.html")
        elif (val == "sunburn"):
            return render_template("sunburn.html")
        elif (val == "tinea pedis"):
            return render_template("tinea_pedis.html")
        print(result)
예제 #53
0
    async def on_member_join(self, member):

        member_id = member.id
        member_name = member.name
        member_tag = "<@" + str(member_id) + ">"
        data = misc.return_data(str(member.guild.id), WELCOME_FILE_PATH)
        role = discord.utils.get(member.guild.roles, id=data["self_role"])
        w_channel = discord.utils.get(member.guild.channels,
                                      id=data["welcome_channel"])

        welcome_msg = data["welcome_msg"]

        is_w_images = False
        count_list = []
        for i in range(10):
            if os.path.isfile(
                    os.path.join(
                        ROOT_PATH, "images/backgrounds/" +
                        str(member.guild.id) + "_" + str(i) + ".jpg")):
                is_w_images = True
                count_list.append(i)
            else:
                pass
        try:
            image_num = random.choice(count_list)
        except:
            image_num = random.randint(1, 10)

        W, H = 1920, 872
        # messeges
        hello = "Hey Buddy,"
        username = str(self.bot.get_user(member.id))
        msg = 'You Are {}th Member of The Server'.format(
            member.guild.member_count)

        url = requests.get(member.avatar_url)
        avatar = Image.open(BytesIO(url.content))
        avatar = avatar.resize((380, 380))
        bigsize = (avatar.size[0] * 3, avatar.size[1] * 3)
        mask = Image.new('L', bigsize, 0)
        draw = ImageDraw.Draw(mask)
        draw.ellipse((0, 0) + bigsize, fill=255)
        mask = mask.resize(avatar.size)
        avatar.putalpha(mask)
        output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5))
        output.putalpha(mask)
        output.save('images/avatar.png')
        ava = Image.open('images/avatar.png')
        ava_draw = ImageDraw.Draw(ava)
        ava_draw.arc((0, 0, 380, 380),
                     start=0,
                     end=360,
                     fill=(194, 83, 111),
                     width=12)
        ava.save("images/avatar.png")
        # avatar = Image.open('avatar2.png')
        if is_w_images:
            welc = Image.open('images/backgrounds/' + str(member.guild.id) +
                              "_" + str(image_num) + '.jpg')

        else:
            welc = Image.open('images/backgrounds/welcome' + str(image_num) +
                              '.jpg')
        font1 = ImageFont.truetype('fonts/UbuntuMono-B.ttf', 90)
        font2 = ImageFont.truetype('fonts/Caveat-Bold.ttf', 100)

        welc_draw = ImageDraw.Draw(welc)
        hello_w, hello_h = welc_draw.textsize(hello, font2)
        user_w, user_h = welc_draw.textsize(username, font1)
        msg_w, msg_h = welc_draw.textsize(msg, font2)
        welc_draw.text(xy=((W - hello_w) / 2, (H - hello_h) / 1.44),
                       text=hello,
                       fill=(190, 222, 203),
                       font=font2,
                       align='center')
        welc_draw.text(xy=((W - user_w) / 2, (H - user_h) / 1.2),
                       text=username,
                       fill=(222, 239, 90),
                       font=font1,
                       align='center')
        welc_draw.text(xy=((W - msg_w) / 2, (H - msg_h) / 1),
                       text=msg,
                       fill=(248, 206, 160),
                       font=font2,
                       align='center')
        welc.paste(ava, (760, 150), ava)
        test = []
        for i in welcome_msg:
            wlmsg = i
            if "member.mention" in i:
                wlmsg = str(i).replace("{" + "member.mention" + "}",
                                       str(member_tag))
            if "member.name" in i:
                wlmsg = str(i).replace("{" + "member.name" + "}",
                                       str(member_name))
            if "member.count" in i:
                wlmsg = str(i).replace("{" + "member.count" + "}",
                                       str(member.guild.member_count))
            if "member.server_name" in i:
                wlmsg = str(i).replace("{" + "member.server_name" + "}",
                                       str(member.guild.name))
            if "member.role" in i:
                wlmsg = str(i).replace("{" + "member.role" + "}", str(role))
            else:
                test.append(wlmsg)
        welc.save('tmpBv.png', format='PNG')
        # bot.
        if role:
            await member.add_roles(role)
        file = discord.File(open('tmpBv.png', 'rb'))
        # embed = discord.Embed(title=':r_arrow: Hey buddy {}'.format(str(member)), description='welcome to the  {}! you are  {}th member of no server.'.format(member.guild.name, member.guild.member_count), color=0x1f1d1d)
        embed = discord.Embed(description=''.join(test), color=0x00ff00)
        # embed.set_thumbnail(url = member.guild.icon_url)
        embed.set_image(url='attachment://tmpBv.png')
        await w_channel.send(file=file, embed=embed)
예제 #54
0
    # testing
    md = CNN_LSTM().float()
    cmd = CNN_LSTM().float()

    md.load_state_dict(torch.load('md.pth'))
    cmd.load_state_dict(torch.load('md.pth'))

    image = Image.open('./data/test/image_left/uu_000041.jpg').convert('RGB')
    img_s = image.size

    # crop image at center with window size 600 * 160
    c_img = crop_image(image)
    c_img = generate_coord_channels(c_img)

    # scale original image to 600 * 160
    image = ImageOps.fit(image, (600, 160), Image.ANTIALIAS)
    image = generate_coord_channels(image)

    # turn image into torch tensor for prediction
    image = transform(image).view(1, 5, 160, 600)
    c_img = transform(c_img).view(1, 5, 160, 600)

    # predict
    outputs = md(image.float())
    outputs = outputs.detach().numpy().reshape((1, 600))

    coutputs = cmd(c_img.float())
    coutputs = coutputs.detach().numpy().reshape((1, 600))

    sv = visualize_segmentation(outputs)
    cv = visualize_segmentation(coutputs)
예제 #55
0
def thumbnail(image_url,
              width,
              height,
              upscale=True,
              quality=95,
              left=.5,
              top=.5,
              padding=False,
              padding_color="#fff"):
    """
    Given the URL to an image, resizes the image using the given width
    and height on the first time it is requested, and returns the URL
    to the new resized image. If width or height are zero then original
    ratio is maintained. When ``upscale`` is False, images smaller than
    the given size will not be grown to fill that size. The given width
    and height thus act as maximum dimensions.
    """
    if not image_url:
        return ""
    try:
        from PIL import Image, ImageFile, ImageOps
    except ImportError:
        return ""

    s3urlpath = "/".join(
        filter(None,
               [settings.MEDIA_URL.strip("/"), settings.AWS_LOCATION])) + "/"

    image_url = unquote(str(image_url)).split("?")[0]
    if image_url.startswith(s3urlpath):
        image_url = image_url.replace(s3urlpath, "", 1)
    image_dir, image_name = os.path.split(image_url)
    image_prefix, image_ext = os.path.splitext(image_name)
    filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext.lower(), "JPEG")
    thumb_name = "%s-%sx%s" % (image_prefix, width, height)
    if not upscale:
        thumb_name += "-no-upscale"
    if left != .5 or top != .5:
        left = min(1, max(0, left))
        top = min(1, max(0, top))
        thumb_name = "%s-%sx%s" % (thumb_name, left, top)
    thumb_name += "-padded-%s" % padding_color if padding else ""
    thumb_name = "%s%s" % (thumb_name, image_ext)

    # `image_name` is used here for the directory path, as each image
    # requires its own sub-directory using its own name - this is so
    # we can consistently delete all thumbnails for an individual
    # image, which is something we do in filebrowser when a new image
    # is written, allowing us to purge any previously generated
    # thumbnails that may match a new image name.
    thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
                             settings.THUMBNAILS_DIR_NAME, image_name)
    if not os.path.exists(thumb_dir):
        try:
            os.makedirs(thumb_dir)
        except OSError:
            pass

    thumb_path = os.path.join(thumb_dir, thumb_name)
    thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
                              quote(image_name.encode("utf-8")),
                              quote(thumb_name.encode("utf-8")))
    image_url_path = os.path.dirname(image_url)
    if image_url_path:
        thumb_url = "%s/%s" % (image_url_path, thumb_url)

    # The original tag returns a local image_url, but when you're using
    # S3 you want to return the remote url. Here, we construct that URL.

    terms = filter(
        None,
        [
            settings.MEDIA_URL.strip("/"),
            settings.
            AWS_LOCATION,  # this might be an empty string, hence filter()
            thumb_url,
        ])

    remote_url = "/".join(terms)

    try:
        thumb_exists = os.path.exists(thumb_path)

    except UnicodeEncodeError:
        # The image that was saved to a filesystem with utf-8 support,
        # but somehow the locale has changed and the filesystem does not
        # support utf-8.
        from mezzanine.core.exceptions import FileSystemEncodingChanged
        raise FileSystemEncodingChanged()

    if thumb_exists:
        # Thumbnail exists, don't generate it.

        if settings.DEBUG:
            print("thumb_exists codepath")

        return remote_url
    elif not default_storage.exists(image_url):
        # Requested image does not exist, just return its URL.

        if settings.DEBUG:
            print("not default_storage.exists(image_url) codepath")

        return remote_url

    if settings.DEBUG:
        print("opening image url:", image_url)

    f = default_storage.open(image_url)
    try:
        image = Image.open(f)
    except:
        # Invalid image format.
        return remote_url

    image_info = image.info

    # Transpose to align the image to its orientation if necessary.
    # If the image is transposed, delete the exif information as
    # not all browsers support the CSS image-orientation:
    # - http://caniuse.com/#feat=css-image-orientation
    try:
        orientation = image._getexif().get(0x0112)
    except:
        orientation = None
    if orientation:
        methods = {
            2: (Image.FLIP_LEFT_RIGHT, ),
            3: (Image.ROTATE_180, ),
            4: (Image.FLIP_TOP_BOTTOM, ),
            5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
            6: (Image.ROTATE_270, ),
            7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
            8: (Image.ROTATE_90, )
        }.get(orientation, ())
        if methods:
            image_info.pop('exif', None)
            for method in methods:
                image = image.transpose(method)

    to_width = int(width)
    to_height = int(height)
    from_width = image.size[0]
    from_height = image.size[1]

    if not upscale:
        to_width = min(to_width, from_width)
        to_height = min(to_height, from_height)

    # Set dimensions.
    if to_width == 0:
        to_width = from_width * to_height // from_height
    elif to_height == 0:
        to_height = from_height * to_width // from_width
    original_image_mode = image.mode
    if original_image_mode not in ("P", "L", "RGBA"):
        try:
            image = image.convert("RGBA")
        except:
            return remote_url
    # Required for progressive jpgs.
    ImageFile.MAXBLOCK = 2 * (max(image.size)**2)

    # Padding.
    if padding and to_width and to_height:
        from_ratio = float(from_width) / from_height
        to_ratio = float(to_width) / to_height
        pad_size = None
        if to_ratio < from_ratio:
            pad_height = int(to_height * (float(from_width) / to_width))
            pad_size = (from_width, pad_height)
            pad_top = (pad_height - from_height) // 2
            pad_left = 0
        elif to_ratio > from_ratio:
            pad_width = int(to_width * (float(from_height) / to_height))
            pad_size = (pad_width, from_height)
            pad_top = 0
            pad_left = (pad_width - from_width) // 2
        if pad_size is not None:
            pad_container = Image.new("RGBA", pad_size, padding_color)
            pad_container.paste(image, (pad_left, pad_top))
            image = pad_container

    # Create the thumbnail.
    to_size = (to_width, to_height)
    to_pos = (left, top)
    try:
        if original_image_mode != image.mode:
            image = image.convert(original_image_mode)
        image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
        image = image.save(thumb_path, filetype, quality=quality, **image_info)
        # Push a remote copy of the thumbnail if MEDIA_URL is
        # absolute.
        if "://" in settings.MEDIA_URL:
            with open(thumb_path, "rb") as f:
                default_storage.save(unquote(thumb_url), File(f))

        if settings.DEBUG:
            print("wrote thumb_url to disk:", thumb_url)

    except Exception:
        # If an error occurred, a corrupted image may have been saved,
        # so remove it, otherwise the check for it existing will just
        # return the corrupted image next time it's requested.
        try:
            os.remove(thumb_path)
        except Exception:
            pass

        if settings.DEBUG:
            print("thumbnail writing exception")

        return remote_url

    if settings.DEBUG:
        print("using final remote_url:", remote_url)

    return remote_url
예제 #56
0
# Load the model
model = tensorflow.keras.models.load_model('model/keras_model.h5')

# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

# Replace this with the path to your image
image = Image.open('IMG_20200928_114017.jpg')

# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)

# turn the image into a numpy array
image_array = np.asarray(image)

# display the resized image
image.show()

# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1

# Load the image into the array
data[0] = normalized_image_array

# run the inference
prediction = model.predict(data)
예제 #57
0
    def predictType(self, model, bytesData):
        global Item_Dictionary, Item1, Item2
        imgdata = np.frombuffer(bytesData, dtype='uint8')

        # img decode
        decimg = cv2.imdecode(imgdata, 1)
        img_location = './img_file/buf.png'

        cv2.imwrite(img_location, decimg)
        """
        Create an array of the shape to supply to the Keras model
        The number of 'lengths' or images that can be placed in an array...
        ..determined by the first position of the tuple, in this case '1'
        """
        data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
        image = Image.open(img_location)
        size = (224, 224)
        image = ImageOps.fit(image, size, Image.ANTIALIAS)

        #turn the image into a numpy array
        image_array = np.asarray(image)

        # Normalize the image
        normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1

        # Load the image into the array
        data[0] = normalized_image_array

        prediction = model.predict(data)
        predict_class = max(prediction[0])
        if predict_class < 0.8:
            print("[ Cannot Distinguish ]")
            predict_type = 'ERR_001'
            classification = '0'
        else:
            for i in range(len(prediction[0])):
                if prediction[0][i] == predict_class:
                    classification = i
                    predict_type = Item_Dictionary.get(i)
                    print(classification)
                    print(predict_type)
                    break

        if Item1 == classification:
            self.count = 2
        elif Item2 == classification:
            self.count = 3
        else:
            self.count = 4

        img = "conveyor{}.png".format(str(self.count))
        img_change = QPixmap()
        img_change.load(img)
        img_change = img_change.scaled(580, 440)
        self.conveyor.setPixmap(QtGui.QPixmap(img_change))  #image path
        QApplication.processEvents()
        self.C2Mque.put(classification)

        now = datetime.datetime.now()
        capdate = now.strftime('%Y-%m-%d')
        captime = now.strftime('%H:%M:%S')

        product_info = "{0}{1}{2}{3}".format(predict_type, self.cal, capdate,
                                             captime)
        product_data = product_info.encode()

        self.textEdit.append('Predict Type : {}'.format(predict_type))
        self.textEdit.append('Date : {}'.format(capdate))
        self.textEdit.append('Time : {}'.format(captime))
        self.textEdit.append('Product ADD Complete')
        self.textEdit.append('')

        self.sendData(product_data)
예제 #58
0
# Load the model
model = tensorflow.keras.models.load_model('keras_model.h5')

# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

# Replace this with the path to your image
image = Image.open('test.jpg')

#resize the image to a 224x224 with the same strategy as in TM2:
#resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)

#turn the image into a numpy array

# display the resized image
# image.show()

# Normalize the image
normalized_image_array = (np.asarray(ImageOps.fit(
    image, size, Image.ANTIALIAS)).astype(np.float32) / 127.0) - 1

# Load the image into the array
data[0] = normalized_image_array
print(data)
# run the inference
prediction = model.predict(data)
print(prediction)
예제 #59
0
 def _crop(self, image, size, pos):
     if pos == "face":
         pos_ratio = self._get_face_position(image)
     else:
         pos_ratio = Image.POSITIONS_TO_RATIOS.get(pos, (0.5, 0.5))
     return PilImageOps.fit(image, size, PilImage.ANTIALIAS, 0, pos_ratio)
예제 #60
-1
    def test_sanity(self):

        ImageOps.autocontrast(hopper("L"))
        ImageOps.autocontrast(hopper("RGB"))

        ImageOps.autocontrast(hopper("L"), cutoff=10)
        ImageOps.autocontrast(hopper("L"), ignore=[0, 255])

        ImageOps.autocontrast_preserve(hopper("L"))
        ImageOps.autocontrast_preserve(hopper("RGB"))

        ImageOps.autocontrast_preserve(hopper("L"), cutoff=10)
        ImageOps.autocontrast_preserve(hopper("L"), ignore=[0, 255])

        ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255))
        ImageOps.colorize(hopper("L"), "black", "white")

        ImageOps.crop(hopper("L"), 1)
        ImageOps.crop(hopper("RGB"), 1)

        ImageOps.deform(hopper("L"), self.deformer)
        ImageOps.deform(hopper("RGB"), self.deformer)

        ImageOps.equalize(hopper("L"))
        ImageOps.equalize(hopper("RGB"))

        ImageOps.expand(hopper("L"), 1)
        ImageOps.expand(hopper("RGB"), 1)
        ImageOps.expand(hopper("L"), 2, "blue")
        ImageOps.expand(hopper("RGB"), 2, "blue")

        ImageOps.fit(hopper("L"), (128, 128))
        ImageOps.fit(hopper("RGB"), (128, 128))

        ImageOps.flip(hopper("L"))
        ImageOps.flip(hopper("RGB"))

        ImageOps.grayscale(hopper("L"))
        ImageOps.grayscale(hopper("RGB"))

        ImageOps.invert(hopper("L"))
        ImageOps.invert(hopper("RGB"))

        ImageOps.mirror(hopper("L"))
        ImageOps.mirror(hopper("RGB"))

        ImageOps.posterize(hopper("L"), 4)
        ImageOps.posterize(hopper("RGB"), 4)

        ImageOps.solarize(hopper("L"))
        ImageOps.solarize(hopper("RGB"))