Example #1
0
	def make_image_to_post(self, job_string, image, image_format):
		font_size_to_width_ratio = 0.061
		font_location = './Gotham-Bold.otf'
		fontColors = ['white', 'black']

		logging.info(str(datetime.now()) + ': Begin image compositing.')

		try:
			im = Image(blob=image)
		except MissingDelegateError as e:
			raise
		except:
			raise

		font_size = im.width * font_size_to_width_ratio

		im_clone = im.clone()
		im_clone.resize(1,1)
		
		for row in im_clone:
			for col in row:
				assert isinstance(col, Color)
				if (col.red + col.green + col.blue) / 3.0 >= 0.5:
					fontColors.reverse()

		font = Font(font_location, size=font_size, color=Color(fontColors[1]))
		im.caption(job_string, left=7, top=7, width=im.width-10, height=im.height-10, font=font)
		font = Font(font_location, size=font_size, color=Color(fontColors[0]))
		im.caption(job_string, left=5, top=5, width=im.width-10, height=im.height-10, font=font)

		im.format = image_format
		return im
def create_row(imgs,
               offsets,
               gap,
               fixed_width=0,
               caption=None,
               caption_offset=(0, 0)):
    row_width = 0
    i = 0
    row_height = 0

    for img in imgs:
        if isinstance(img, Image):
            row_width += img.width + gap
            row_height = max(img.height, row_height)
        else:
            row_width += offsets[i][0] + gap
        i += 1

    if fixed_width:
        row_width = fixed_width

    row = Image(width=row_width, height=row_height)

    i = 0
    x = 0

    for img in imgs:
        if isinstance(img, Image):
            row.composite(img,
                          left=x + offsets[i],
                          top=(row_height - img.height) / 2)
            x += img.width + offsets[i] + gap
        else:
            (offset_x, offset_y, width, font) = offsets[i]
            row.caption(img,
                        left=x + offset_x,
                        top=offset_y,
                        width=250,
                        height=250,
                        font=font)
            x += width + gap
        i += 1

    if caption:
        caption_font = Font(
            path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts,
            size=14)
        row_w_caption = Image(width=row_width, height=row_height + 20)
        row_w_caption.caption(caption,
                              left=caption_offset[0],
                              top=caption_offset[1],
                              width=1450,
                              height=50,
                              font=caption_font)
        row_w_caption.composite(row, left=0, top=20)
        return row_w_caption
    else:
        return row
Example #3
0
File: common.py Project: arperov/ib
def write_banner():
    img = Image(width=250, height=100, pseudo='plasma:fractal')
    f = Font(path='/cgi-bin/sarial.ttf', size=32, color="#d33682")
    img.caption(text='ibbb.me', font=f, gravity='south_east')
    img.save(filename='/var/www/cgi-bin/files/banner.png')
    print('<div class="banner">'
          '<center>'
          '<img src="/cgi-bin/files/banner.png">'
          '</center>'
          '</div>')
Example #4
0
    def set_intro_text(self, txt):
        img = Image(filename="{}/blank.png".format(self.src))

        img.caption(text=txt,
                    font=self.FONT,
                    width=430,
                    left=45,
                    height=60,
                    top=200,
                    gravity='center')

        img.save(filename="{}/intro.png".format(self.out))
Example #5
0
def genTextPNG(text,font="/usr/share/fonts/type1/gsfonts/c059016l.pfb",fsize=48):
	#put these here so that they are not imported until needed
	from wand.image import Image
	from wand.font import Font
	from wand.color import Color

	# convert -size 1000x180 xc:transparent -fx 0 -channel A -fx 'cos(0.6*pi*(i/w-0.5))-0.0' background.png
	img=Image(filename="background1280.png")
	fontwhite=Font(path=font,color=Color("white"),size=fsize,antialias=True)
	fontblack=Font(path=font,color=Color("black"),size=fsize,antialias=True)
	img.caption(text,font=fontblack,gravity='center',left=8,top=8)
	img.caption(text,font=fontwhite,gravity='center')
	final = Image(width=1280,height=720)
	final.composite(img,0,530)
	return final
def create_row(imgs, offsets, gap, fixed_width=0, caption=None, caption_offset=(0,0)):
	row_width = 0
	i = 0
	row_height = 0

	for img in imgs:
		if isinstance(img, Image):
			row_width += img.width + gap
			row_height = max(img.height, row_height)
		else:
			row_width += offsets[i][0] + gap
		i += 1

	if fixed_width:
		row_width = fixed_width

	row = Image(width=row_width, height=row_height)

	i = 0
	x = 0

	for img in imgs:
		if isinstance(img, Image):
			row.composite(img, left=x + offsets[i], top=(row_height - img.height) / 2)
			x += img.width + offsets[i] + gap
		else:
			(offset_x, offset_y, width, font) = offsets[i]
			row.caption(img, left=x + offset_x, top=offset_y, width=250, height=250, font=font)
			x += width + gap
		i += 1

	if caption:
		caption_font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=14)
		row_w_caption = Image(width=row_width, height=row_height+20)
		row_w_caption.caption(caption, left=caption_offset[0], top=caption_offset[1],
								width=1450, height=50, font=caption_font)
		row_w_caption.composite(row, left=0, top=20)
		return row_w_caption
	else:
		return row
def compose_adaptive_prey(img_paths=None,
                          match_json=None,
                          gap=5,
                          horizontal_gap=5,
                          description=None,
                          caption="Catcierge"):

    img = Image(width=600, height=1124, background=Color("#8A968E"))

    #print("Font path: %s" % args.fonts)

    font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" %
                args.fonts,
                size=64)
    font_title = Font(path="%s/alex-brush/AlexBrush-Regular.ttf" % args.fonts,
                      size=64)
    font_math = Font(path="%s/Asana-Math/Asana-Math.otf" % args.fonts, size=64)

    imgs = []
    assert (img_paths and (len(img_paths) > 0)) or match_json, \
     "Missing either a list of input image paths or a match json"

    if not img_paths or len(img_paths) == 0:
        step_count = match_json["step_count"]

        for step in match_json["steps"][:step_count]:
            print("Step: %s" % step["name"])
            img_paths.append(step["path"])

    # TODO: Allow any matcher type and number of images...
    assert len(img_paths) == 1 or len(img_paths) == 11, \
     "Invalid number of images %d, expected 2 or 11" % len(img_paths)

    for img_path in img_paths:
        #print img_path
        imgs.append(Image(filename=img_path))

    mpos = lambda w: (img.width - w) / 2

    if len(img_paths) == 1:
        img.caption(caption,
                    left=(img.width - 250) / 2,
                    top=5,
                    width=250,
                    height=100,
                    font=font_title)
        img.composite(imgs[0], left=mpos(imgs[0].width), top=120)
        return img

    orgimg = imgs[0]  # Original image.
    detected = imgs[1]  # Detected cat head roi.
    croproi = imgs[2]  # Cropped/extended roi.
    globalthr = imgs[3]  # Global threshold (inverted).
    adpthr = imgs[4]  # Adaptive threshold (inverted).
    combthr = imgs[5]  # Combined threshold.
    opened = imgs[6]  # Opened image.
    dilated = imgs[7]  # Dilated image.
    combined = imgs[8]  # Combined image (re-inverted).
    contours = imgs[9]  # Contours of white areas.
    final = imgs[10]  # Final image.

    # TODO: Enable creating these based on input instead.
    kernel3x3 = create_kernel(w=3, h=3)
    kernel2x2 = create_kernel(w=2, h=2)
    kernel5x1 = create_kernel(w=5, h=1)

    x_start = 20

    img.caption(caption,
                left=(img.width - 250) / 2,
                top=5,
                width=250,
                height=100,
                font=font_title)

    if description:
        desc_font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" %
                         args.fonts,
                         size=24)
        text_width = (desc_font.size) * int(len(description) * 0.7)
        img.caption(description,
                    left=(img.width - text_width) / 2,
                    top=80,
                    width=text_width,
                    height=100,
                    font=desc_font)

    height = 120

    # Original.
    img.composite(orgimg, left=mpos(orgimg.width), top=height)
    height += orgimg.height + gap

    # Detected head + cropped region of interest.
    head_row = create_row([detected, croproi], [0, 0],
                          horizontal_gap,
                          caption="Detected head  Cropped ROI")
    img.composite(head_row, left=mpos(head_row.width), top=height)
    height += head_row.height + gap

    # TODO: simplify the code below by making the symbols into images before they're used to create the rows.

    # Combine the threshold images.
    thr_row = create_row(
        [globalthr, "+", adpthr, "=", combthr], [
            x_start, (4 * horizontal_gap, -15, 14 * horizontal_gap, font), 0,
            (2 * horizontal_gap, -15, 8 * horizontal_gap, font),
            2 * horizontal_gap
        ],
        horizontal_gap,
        fixed_width=img.width,
        caption=
        "Global Threshold           Adaptive Threshold       Combined Threshold",
        caption_offset=(x_start, 0))
    img.composite(thr_row, left=mpos(thr_row.width), top=height)
    height += thr_row.height + gap

    # Open the combined threshold.
    open_row = create_row(
        [combthr, u"∘", kernel2x2, "=", opened], [
            x_start,
            (5 * horizontal_gap, -5, 14 * horizontal_gap, font_math), 0,
            (21 * horizontal_gap, -15, 10 * horizontal_gap, font),
            19 * horizontal_gap + 3
        ],
        horizontal_gap,
        fixed_width=img.width,
        caption=
        "Combined Threshold         2x2 Kernel               Opened Image",
        caption_offset=(x_start, 0))
    img.composite(open_row, left=mpos(open_row.width), top=height)
    height += open_row.height + gap

    # Dilate opened and combined threshold with a kernel3x3.
    dilated_row = create_row(
        [opened, u"⊕", kernel3x3, "=", dilated], [
            x_start,
            (3 * horizontal_gap, -5, 14 * horizontal_gap, font_math), 0,
            (17 * horizontal_gap, -15, 10 * horizontal_gap, font),
            15 * horizontal_gap + 3
        ],
        horizontal_gap,
        fixed_width=img.width,
        caption=
        "Opened Image               3x3 Kernel               Dilated Image",
        caption_offset=(x_start, 0))
    img.composite(dilated_row, left=mpos(dilated_row.width), top=height)
    height += dilated_row.height + gap

    # Inverted image and contour.
    contour_row = create_row([combined, contours], [0, 0],
                             horizontal_gap,
                             caption="  Re-Inverted         Contours")
    img.composite(contour_row, left=mpos(contour_row.width), top=height)
    height += contour_row.height + 2 * gap

    # Final.
    img.composite(final, left=mpos(final.width), top=height)
    height += final.height + gap

    return img
        pages = 1
        
        image = Image(
            width = imageFromPdf.width,
            height = imageFromPdf.height*pages          
           
        )
        
        for i in range(pages):
            image.composite(
                imageFromPdf.sequence[i],
                top = imageFromPdf.height * i,
                left = 0
            )
            
        image.resize(250,250)
        image.alpha_channel = False
        image.format = 'png'
        print(image.size)
        image.background_color = Color('pink')
        
        image.type = 'grayscale'
        image.caption = file.split('.')[0]
        image.save(filename = fileDirectory+file.split('.')[0]+".png")

        image.clear()
        image.close()

        #display(image)
def compose_adaptive_prey(img_paths=None, match_json=None, gap=5, horizontal_gap=5, description=None, caption="Catcierge"):

	img = Image(width=600, height=1124, background=Color("#8A968E"))

	#print("Font path: %s" % args.fonts)

	font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=64)
	font_title = Font(path="%s/alex-brush/AlexBrush-Regular.ttf" % args.fonts, size=64)
	font_math = Font(path="%s/Asana-Math/Asana-Math.otf" % args.fonts, size=64)


	imgs = []
	assert (img_paths and (len(img_paths) > 0)) or match_json, \
		"Missing either a list of input image paths or a match json"

	if not img_paths or len(img_paths) == 0:
		step_count = match_json["step_count"]

		for step in match_json["steps"][:step_count]:
			print("Step: %s" % step["name"])
			img_paths.append(step["path"])

	# TODO: Allow any matcher type and number of images...	
	assert len(img_paths) == 1 or len(img_paths) == 11, \
		"Invalid number of images %d, expected 2 or 11" % len(img_paths)

	for img_path in img_paths:
		#print img_path
		imgs.append(Image(filename=img_path))

	mpos = lambda w: (img.width - w) / 2

	if len(img_paths) == 1:
		img.caption(caption, left=(img.width - 250) / 2, top=5, width=250, height=100, font=font_title)
		img.composite(imgs[0], left=mpos(imgs[0].width), top=120)
		return img

	orgimg = imgs[0]	# Original image.
	detected = imgs[1]	# Detected cat head roi. 
	croproi = imgs[2]	# Cropped/extended roi.
	globalthr = imgs[3]	# Global threshold (inverted).
	adpthr = imgs[4]	# Adaptive threshold (inverted).
	combthr = imgs[5]	# Combined threshold.
	opened = imgs[6]	# Opened image.
	dilated = imgs[7]	# Dilated image.
	combined = imgs[8]	# Combined image (re-inverted).
	contours = imgs[9]	# Contours of white areas.
	final = imgs[10]	# Final image.
	
	# TODO: Enable creating these based on input instead.
	kernel3x3 = create_kernel(w=3, h=3)
	kernel2x2 = create_kernel(w=2, h=2)
	kernel5x1 = create_kernel(w=5, h=1)

	x_start = 20

	img.caption(caption, left=(img.width - 250) / 2, top=5, width=250, height=100, font=font_title)

	if description:
		desc_font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=24)
		text_width = (desc_font.size) * int(len(description) * 0.7)
		img.caption(description, left=(img.width - text_width) / 2, top=80, width=text_width, height=100, font=desc_font)

	height = 120

	# Original.
	img.composite(orgimg, left=mpos(orgimg.width), top=height) 
	height += orgimg.height + gap
	
	# Detected head + cropped region of interest.
	head_row = create_row([detected, croproi], [0, 0], horizontal_gap, caption="Detected head  Cropped ROI")
	img.composite(head_row, left=mpos(head_row.width), top=height)
	height += head_row.height + gap

	# TODO: simplify the code below by making the symbols into images before they're used to create the rows.

	# Combine the threshold images.
	thr_row = create_row([globalthr, "+", adpthr, "=", combthr],
						[x_start,
						(4 * horizontal_gap, -15, 14 * horizontal_gap, font),
						0,
						(2 * horizontal_gap, -15, 8 * horizontal_gap, font),
						2 * horizontal_gap],
						horizontal_gap, fixed_width=img.width,
						caption="Global Threshold           Adaptive Threshold       Combined Threshold",
						caption_offset=(x_start, 0))
	img.composite(thr_row, left=mpos(thr_row.width), top=height)
	height += thr_row.height + gap

	# Open the combined threshold.
	open_row = create_row([combthr, u"∘", kernel2x2, "=", opened],
						[x_start,
						(5 * horizontal_gap, -5, 14 * horizontal_gap, font_math),
						0,
						(21 * horizontal_gap, -15, 10 * horizontal_gap, font),
						19 * horizontal_gap + 3],
						horizontal_gap, fixed_width=img.width,
						caption="Combined Threshold         2x2 Kernel               Opened Image",
						caption_offset=(x_start, 0))
	img.composite(open_row, left=mpos(open_row.width), top=height)
	height += open_row.height + gap

	# Dilate opened and combined threshold with a kernel3x3.
	dilated_row = create_row([opened, u"⊕", kernel3x3, "=", dilated],
						[x_start,
						(3 * horizontal_gap, -5, 14 * horizontal_gap, font_math),
						0,
						(17 * horizontal_gap, -15, 10 * horizontal_gap, font),
						15 * horizontal_gap + 3],
						horizontal_gap, fixed_width=img.width,
						caption="Opened Image               3x3 Kernel               Dilated Image",
						caption_offset=(x_start, 0))
	img.composite(dilated_row, left=mpos(dilated_row.width), top=height)
	height += dilated_row.height + gap

	# Inverted image and contour.
	contour_row = create_row([combined, contours], [0, 0], horizontal_gap, caption="  Re-Inverted         Contours")
	img.composite(contour_row, left=mpos(contour_row.width), top=height)
	height += contour_row.height + 2 * gap

	# Final.
	img.composite(final, left=mpos(final.width), top=height)
	height += final.height + gap

	return img