Example #1
0
def make_frame(t):
    #colorDiv = t + 0.01
    #color1= [1.0 / colorDiv, 0.0, 0.0]
    #color2 = [0.0, 1.0 / colorDiv, 0.0]

    #gradient= gizeh.ColorGradient("linear",((0,(0,.5,1)),(1,(0,1,1))), xy1=(-cosR,-sinR), xy2=(cosR,sinR))

    #gradRad1 = radius1 - 20
    #gradRad2 = radius1 + 20
    #gradient = gizeh.ColorGradient(type="radial",
    #                               stops_colors = [(0,color1),(1,color2)],
    #                               xy1=[0.0,0.0], xy2=[gradRad1,0.0], xy3 = [0.0,gradRad2])
    surface = gizeh.Surface(W,H)

    # orbit halo
    #circle1 = gizeh.circle(radius1, xy = (W/2, H/2), stroke=gradient, stroke_width=5)
    #circle1.draw(surface)

    for i in range(numParticles):
      # Orbiting planet
      particle = particles[i]

      if (particle.easing == 1):
        angle = pytweening.linear((duration - t) / duration) * 360 * particle.direction
      elif (particle.easing == 2):
        angle = pytweening.easeInQuad((duration - t) / duration) * 360 * particle.direction
      elif (particle.easing == 3):
        angle = pytweening.easeOutQuad((duration - t) / duration) * 360 * particle.direction
      elif (particle.easing == 4):
        angle = pytweening.easeInOutQuad((duration - t) / duration) * 360 * particle.direction
      elif (particle.easing == 5):
        angle = pytweening.easeInSine((duration - t) / duration) * 360 * particle.direction
      elif (particle.easing == 6):
        angle = pytweening.easeOutSine((duration - t) / duration) * 360 * particle.direction
      elif (particle.easing == 7):
        angle = pytweening.easeInOutSine((duration - t) / duration) * 360 * particle.direction
      radians = math.radians(angle)
      cosR = math.cos(radians)
      sinR = math.sin(radians)
      x = W/2 + cosR * particle.orbit_radius
      y = H/2 + sinR * particle.orbit_radius
      fill = particle.color
      #circle = gizeh.circle(particle.radius, xy = (x, y), fill=(1,0,1))
      circle = gizeh.circle(particle.radius, xy = (x, y), fill=fill)
      circle.draw(surface)

    return surface.get_npimage()
Example #2
0
 def ease_in_out_quad(n):
     """ 两端快,中间慢(二次方倍速) """
     return pytweening.easeInOutQuad(n)
Example #3
0
def s_curve(v):
    return pytweening.easeInOutQuad(v)
def buildSlideShow(imageList, descriptionList, path, titleText):
    FPS = 60  # Sets the FPS of the entire video
    currentFrame = 0  # The animation hasn't moved yet, so we're going to leave it as zero
    blendingFrames = 60  # Sets the amount of time that each transition should last for
    framesbetweenImages = 120  # Zoom Frames
    staticFrames = 120  #frames at the start are static text comes in here	TO
    textFrames = 30  #Frames for text should be < static frames
    titleTextDuration = 280

    im1 = Image.open(imageList[0])  # Load the image in
    im1 = image_rotate(im1)  # Roate image if nessessacry
    im2 = im1  # Define a second image to force a global variable to be created
    width, height = im1.size  # Get some stats on the image file to create the video with
    ## Limit W & H by 720p maintain aspect
    logging.info("h:" + str(height) + " W;" + str(width))
    width, height = limitTopRes(width, height)

    logging.info("new res: h:" + str(height) + " W;" + str(width))
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    video = cv2.VideoWriter(path + "/" + path + ".avi", fourcc, FPS,
                            (width, height), True)
    #write gallery objects
    imageListFile = open(path + "/imageList-" + path + ".txt", "w+")
    imageListFile.write(str(imageList))
    descriptionListFile = open(path + "/descriptionList-" + path + ".txt",
                               "w+")
    descriptionListFile.write(str(descriptionList))

    #Font Type
    font = 'MaxImpact.ttf'
    tileFontSize = getFontSize(titleText, height, width)

    for idx, val in enumerate(imageList):
        logging.info("start image: " + str(idx))
        im1 = Image.open(val)
        fontSize, text = getFontSize(descriptionList[idx], height, width)

        #logging.info("fontSize: "+str(fontSize))

        ##Get Staticframe lenght
        staticFrames = 80 + (len(text) * 4)
        if staticFrames > 660:
            staticFrames = 660
        ##logging.info("staticFrames "+str(staticFrames))

        buffer = (int(width - (width / 10)), int(height - (height / 10)))
        bufferOffset = (int(width / 20), int(height / 20))
        bufferOffsetW = int(width / 20)
        bufferOffsetH = int(height / 9)

        imgTxt = ImageText(buffer, background=(255, 255, 255, 0))
        imgTxtShadow = ImageText(buffer, background=(0, 0, 0, 0))
        imgTxtWhiteShadow = ImageText(buffer, background=(80, 80, 80, 0))

        textHeight, imgTxt, imgTxtShadow, imgTxtWhiteShadow = textBoxBuilder(
            text, height, width, bufferOffsetH, buffer, font, fontSize)
        textTitleHeight, imgTitleTxt, imgTitleTxtShadow, imgTitleTxtWhiteShadow = textBoxBuilder(
            titleText, height, width, bufferOffsetH, buffer, font, fontSize)

        #Open next image
        if idx == (len(imageList) - 1):
            logging.info("Last Image")
            im2 = Image.open(imageList[0])
        else:
            logging.info(str(idx) + " len" + str(len(imageList)))
            im2 = Image.open(imageList[idx + 1])

        outputimage = Image.new(
            'RGBA', (width, height),
            (50, 50, 50, 255))  # Image all others will paste onto
        outputimageZoom = Image.new(
            'RGBA', (width, height),
            (50, 50, 50, 255))  # Image zoomed others will paste onto
        im1 = image_rotate(im1)
        im1 = resize_image(im1, height, width)
        img_w, img_h = im1.size
        offset = ((width - img_w) // 2, (height - img_h) // 2)
        outputimage.paste(im1, offset)
        outputimageZoom.paste(im1, offset)

        #Output Blend Frames
        outputimageBlend = Image.new(
            'RGBA', (width, height),
            (50, 50, 50, 255))  # Image all others will paste onto

        im2 = image_rotate(im2)
        im2 = resize_image(im2, height, width)
        img_wBlend, img_hBlend = im2.size
        offsetBlend = ((width - img_wBlend) // 2, (height - img_hBlend) // 2)
        outputimageBlend.paste(im2, offsetBlend)

        if (idx == 0):
            #first Image, show titleText on open
            fbi = ((currentFrame) +
                   (framesbetweenImages + blendingFrames + staticFrames +
                    titleTextDuration + textFrames))
            while (currentFrame) < (fbi - blendingFrames -
                                    framesbetweenImages):
                #draw = ImageDraw.Draw(outputimage)
                #draw.text((100, 100), text, font=font, fill="blue")

                #FIrst will always start 0 frame
                currentTitleTextFrame = currentFrame

                if currentFrame <= textFrames:
                    #Do animation for text title axis
                    currentTextFrame = currentFrame / textFrames

                    newTextHeight = int(
                        (bufferOffsetW + textTitleHeight) *
                        pytweening.easeOutQuad(currentTextFrame)
                    ) - bufferOffsetW - textTitleHeight
                    outputimage.paste(outputimageZoom, (0, 0))
                    outputimage.paste(imgTitleTxtShadow.image,
                                      (bufferOffsetW - 2, newTextHeight + 2),
                                      imgTitleTxtShadow.image)
                    outputimage.paste(imgTitleTxtShadow.image,
                                      (bufferOffsetW - 1, newTextHeight + 1),
                                      imgTitleTxtShadow.image)
                    outputimage.paste(imgTitleTxtWhiteShadow.image,
                                      (bufferOffsetW + 1, newTextHeight - 1),
                                      imgTitleTxtWhiteShadow.image)
                    outputimage.paste(imgTitleTxt.image,
                                      (bufferOffsetW, newTextHeight),
                                      imgTitleTxt.image)
                    #logging.info("textHeight N "+str(newTextHeight))
                    video.write(
                        cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                    currentFrame += 1
                elif currentFrame <= (titleTextDuration):
                    #Static

                    video.write(
                        cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                    currentFrame += 1
                #STart Last Frames ANimation here
                elif currentFrame <= textFrames + titleTextDuration:
                    currentTextFrame = (currentFrame -
                                        titleTextDuration) / textFrames
                    newTextHeight = int(height - (
                        (height - textHeight) *
                        pytweening.easeOutQuad(currentTextFrame)))
                    newTextTitleHeight = int(
                        (bufferOffsetW + textTitleHeight) *
                        pytweening.easeOutQuad(-(currentTextFrame - 1))
                    ) - bufferOffsetW - textTitleHeight

                    outputimage.paste(outputimageZoom, (0, 0))
                    outputimage.paste(
                        imgTitleTxtShadow.image,
                        (bufferOffsetW - 2, newTextTitleHeight + 2),
                        imgTitleTxtShadow.image)
                    outputimage.paste(
                        imgTitleTxtShadow.image,
                        (bufferOffsetW - 1, newTextTitleHeight + 1),
                        imgTitleTxtShadow.image)
                    outputimage.paste(
                        imgTitleTxtWhiteShadow.image,
                        (bufferOffsetW + 1, newTextTitleHeight - 1),
                        imgTitleTxtWhiteShadow.image)
                    outputimage.paste(imgTitleTxt.image,
                                      (bufferOffsetW, newTextTitleHeight),
                                      imgTitleTxt.image)

                    outputimage.paste(imgTxtShadow.image,
                                      (bufferOffsetW - 2, newTextHeight + 2),
                                      imgTxtShadow.image)
                    outputimage.paste(imgTxtShadow.image,
                                      (bufferOffsetW - 1, newTextHeight + 1),
                                      imgTxtShadow.image)
                    outputimage.paste(imgTxtWhiteShadow.image,
                                      (bufferOffsetW + 1, newTextHeight - 1),
                                      imgTxtShadow.image)
                    outputimage.paste(imgTxt.image,
                                      (bufferOffsetW, newTextHeight),
                                      imgTxt.image)
                    #logging.info("textHeight N "+str(newTextHeight))
                    video.write(
                        cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                    currentFrame += 1
                else:
                    video.write(
                        cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                    currentFrame += 1
        else:
            fbi = ((currentFrame) +
                   (framesbetweenImages + blendingFrames + staticFrames))
            #Starts static frames text belnd will start here
            while (currentFrame) < (fbi - blendingFrames -
                                    framesbetweenImages):
                #logging.info("static frames"+str(currentFrame))
                #draw = ImageDraw.Draw(outputimage)
                #draw.text((100, 100), text, font=font, fill="blue")

                currentTextFrame = textFrames - (
                    (fbi - blendingFrames - framesbetweenImages - staticFrames)
                    + textFrames - currentFrame)

                if currentTextFrame <= textFrames:
                    #Do animation for text Y axis
                    #Gets range between 0 and 1
                    #logging.info("currentTextFrame "+str(currentTextFrame))
                    currentTextFrame = currentTextFrame / textFrames

                    #logging.info("currentTextFrame -  "+str(currentTextFrame))

                    newTextHeight = int(height - (
                        (height - textHeight) *
                        pytweening.easeOutQuad(currentTextFrame)))
                    outputimage.paste(outputimageZoom, (0, 0))
                    outputimage.paste(imgTxtShadow.image,
                                      (bufferOffsetW - 2, newTextHeight + 2),
                                      imgTxtShadow.image)
                    outputimage.paste(imgTxtShadow.image,
                                      (bufferOffsetW - 1, newTextHeight + 1),
                                      imgTxtShadow.image)
                    outputimage.paste(imgTxtWhiteShadow.image,
                                      (bufferOffsetW + 1, newTextHeight - 1),
                                      imgTxtShadow.image)
                    outputimage.paste(imgTxt.image,
                                      (bufferOffsetW, newTextHeight),
                                      imgTxt.image)
                    #logging.info("textHeight N "+str(newTextHeight))
                    video.write(
                        cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                    currentFrame += 1
                else:
                    #imgTxt.save('sample-imagetext.png')()
                    #outputimage.paste(imgTxt.image, (bufferOffsetW, textHeight), imgTxt.image)

                    video.write(
                        cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                    currentFrame += 1
        #Starts Zoom Frame
        while (currentFrame + 0) < (fbi - blendingFrames):
            #logging.info("zoom frames"+str(currentFrame))
            #find zoom based on frame position var is from 0 to 1
            zoomPosition = -(((fbi - blendingFrames - currentFrame) /
                              (framesbetweenImages))) + 1
            #Find dimensions based on zoom position
            zoomScaleH, zoomScaleW = getZoomScaleInt(height, width,
                                                     zoomPosition)
            #logging.info("next-W"+str(zoomScaleW))
            #logging.info("next-H"+str(zoomScaleH))

            #Check if zoom frames stay similar if not we can antialias frames via blend
            zoomFrameCheck = 1
            keepCheck = "true"
            if (zoomPosition < 1):
                while keepCheck == "true":
                    if (zoomFrameCheck <
                        (fbi - currentFrame - blendingFrames)):
                        nextZoomPosition = -(
                            ((fbi - blendingFrames -
                              (currentFrame + zoomFrameCheck)) /
                             framesbetweenImages)) + 1
                        nextZoomScaleH, nextZoomScaleW = getZoomScaleInt(
                            height, width, nextZoomPosition)
                        #If the frames zoom size are less than 2 pixel differences
                        #then skip ahead this will determine how many inbwteen alias frames there
                        #are with the var ##zoomFrameCheck##
                        if ((nextZoomScaleH - zoomScaleH < 3)
                                or (nextZoomScaleW - zoomScaleW < 3)):
                            zoomFrameCheck += 1
                        else:
                            #logging.info("Size Changed after - " + str(zoomFrameCheck) + " frames")
                            keepCheck = "false"
                    else:
                        #logging.info("Size Changed after and maxed - " + str(zoomFrameCheck) + " frames")
                        keepCheck = "false"

            #If true this starts rendering inital frame and inbetween frames
            if (zoomFrameCheck > 1):
                #get first frames
                outputimageZoomPaste = outputimageZoom.resize(
                    (zoomScaleW, zoomScaleH), Image.ANTIALIAS)
                newOutputimageZoomPaste = Image.new('RGBA', (width, height),
                                                    (50, 50, 255, 255))

                offsetZ = (math.floor((width - zoomScaleW) // 2),
                           math.floor((height - zoomScaleH) // 2))
                newOutputimageZoomPaste.paste(outputimageZoomPaste, offsetZ)
                #get next image size up for blend

                zoomPositionBlend = -(((fbi - blendingFrames -
                                        (currentFrame + zoomFrameCheck - 1)) /
                                       framesbetweenImages)) + 1

                zoomScaleHblend, zoomScaleWblend = getZoomScaleInt(
                    height, width, zoomPositionBlend)
                #logging.info(fbi - blendingFrames - currentFrame + zoomFrameCheck - 1)
                #logging.info(zoomPositionBlend)
                outputimageZoomPasteBlend = outputimageZoom.resize(
                    (zoomScaleWblend, zoomScaleHblend), Image.ANTIALIAS)
                #Crop Image for blend
                newOutputimageZoomPasteBlend = Image.new(
                    'RGBA', (width, height), (50, 50, 50, 255))

                #logging.info("big-W"+str(zoomScaleWblend))
                #logging.info("big-H"+str(zoomScaleHblend))
                #logging.info(zoomScaleHblend)
                offsetZLarge = (math.floor((width - zoomScaleWblend) // 2),
                                math.floor((height - zoomScaleHblend) // 2))

                newOutputimageZoomPasteBlend.paste(outputimageZoomPasteBlend,
                                                   offsetZLarge)

                for x in range(1, zoomFrameCheck):
                    blendPercent = ((x - 1) / (zoomFrameCheck - 1))

                    outputimageZoomPasteBlendOutput = Image.blend(
                        newOutputimageZoomPaste, newOutputimageZoomPasteBlend,
                        blendPercent)

                    #outputimage = Image.new('RGBA', (zoomScaleWblend, zoomScaleHblend), (50, 50, 255, 255))
                    #outputimage.paste(outputimageZoomPasteBlendOutput, offsetZ)
                    #draw = ImageDraw.Draw(outputimage)
                    #draw.text((100, 100), str(blendPercent), font=font, fill="blue")
                    outputimageZoomPasteBlendOutput.paste(
                        imgTxtShadow.image,
                        (bufferOffsetW - 2, textHeight + 2),
                        imgTxtShadow.image)
                    outputimageZoomPasteBlendOutput.paste(
                        imgTxtShadow.image,
                        (bufferOffsetW - 1, textHeight + 1),
                        imgTxtShadow.image)
                    outputimageZoomPasteBlendOutput.paste(
                        imgTxtWhiteShadow.image,
                        (bufferOffsetW + 1, newTextHeight - 1),
                        imgTxtShadow.image)
                    outputimageZoomPasteBlendOutput.paste(
                        imgTxt.image, (bufferOffsetW, textHeight),
                        imgTxt.image)
                    video.write(
                        cv2.cvtColor(np.array(outputimageZoomPasteBlendOutput),
                                     cv2.COLOR_RGB2BGR))
                    #logging.info("Precent:"+str(blendPercent))
                    currentFrame += 1

            #Or else just render single fream
            else:
                #logging.info(currentFrame)
                outputimageZoomPaste = outputimageZoom.resize(
                    (zoomScaleW, zoomScaleH), Image.ANTIALIAS)
                offsetZ = (math.floor((width - zoomScaleW) // 2),
                           math.floor((height - zoomScaleH) // 2))
                outputimage.paste(outputimageZoomPaste, offsetZ)
                outputimage.paste(imgTxtShadow.image,
                                  (bufferOffsetW - 2, textHeight + 2),
                                  imgTxtShadow.image)
                outputimage.paste(imgTxtShadow.image,
                                  (bufferOffsetW - 1, textHeight + 1),
                                  imgTxtShadow.image)
                outputimage.paste(imgTxtWhiteShadow.image,
                                  (bufferOffsetW + 1, newTextHeight - 1),
                                  imgTxtShadow.image)
                outputimage.paste(imgTxt.image, (bufferOffsetW, textHeight),
                                  imgTxt.image)

                #draw = ImageDraw.Draw(outputimage)
                #draw.text((100, 100), 'Hello World!', font=font, fill="blue")
                video.write(
                    cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
                #logging.info(currentFrame)
                currentFrame += 1

        logging.info("Blending frames image: " + str(idx) + " curent Frame:" +
                     str(currentFrame))

        #Start Blending frame
        while (currentFrame < fbi):
            #Blending frames here
            framesLeft = (currentFrame - fbi) + blendingFrames + 1
            easePosition = -(((fbi - currentFrame - 1) / blendingFrames) - 1)
            offesetW = (width / blendingFrames) * framesLeft
            outputimage = Image.new('RGBA', (width, height), (50, 50, 50, 255))
            outputimage.paste(outputimageZoomPaste, offsetZ)
            outputimage.paste(imgTxtShadow.image,
                              (bufferOffsetW - 1, textHeight + 1),
                              imgTxtShadow.image)
            outputimage.paste(imgTxtShadow.image,
                              (bufferOffsetW - 2, textHeight + 2),
                              imgTxtShadow.image)
            outputimage.paste(imgTxtWhiteShadow.image,
                              (bufferOffsetW + 1, newTextHeight - 1),
                              imgTxtShadow.image)
            outputimage.paste(imgTxt.image, (bufferOffsetW, textHeight),
                              imgTxt.image)
            if idx % 2 == 0:
                outputimage.paste(
                    outputimageBlend,
                    (-width +
                     int(pytweening.easeInOutQuad(easePosition) * width), 0))
            else:
                outputimage.paste(
                    outputimageBlend,
                    (width -
                     int(pytweening.easeInOutQuad(easePosition) * width), 0))
            video.write(cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR))
            currentFrame += 1

    try:
        logging.info("Video saving locally")
        video.release()
        logging.info("Video saved")
        return str(path + "/" + path + ".avi")
    except:
        logging.info("Video failed on realese")
        return false
Example #5
0
        if not drag:
            window_vel = (mouse_pos - offset) * glm.vec2(1, 0.6) * 0.8
            if any(glm.isnan(window_vel)):
                window_vel = glm.vec2()

    rl.BeginDrawing()
    rl.ClearBackground([0] * 4)

    rl.BeginTextureMode(target)
    rl.ClearBackground([0] * 4)
    rl.DrawRectangle(0, 0, width, height, [0] * 4)

    rl.DrawTextureEx(
        awd,
        tween.getPointOnLine(*start1, *end1, tween.easeInOutQuad(elapsed)),
        0,
        32,
        [255, 200, 0, trans]  # Yellow
    )

    rl.DrawTextureEx(
        awd,
        tween.getPointOnLine(*start2, *end2, tween.easeInOutQuad(elapsed)),
        -90,
        32,
        [255, 0, 55, trans]  # Red
    )

    rl.DrawTextureEx(
        awd,
		rl.SetWindowPosition(int(window_pos.x), int(window_pos.y))

		if not drag:
			window_vel = (mouse_pos - offset) * glm.vec2(1, 0.6) * 0.8
			if any(glm.isnan(window_vel)):
				window_vel = glm.vec2()

	rl.BeginDrawing()
	rl.ClearBackground([0] * 4)

	rl.BeginTextureMode(target)
	rl.ClearBackground([0] * 4)
	rl.DrawRectangle(0, 0, width, height, [0] * 4)

	rl.DrawTextureEx(
		awd, tween.getPointOnLine(*start1, *end1, tween.easeInOutQuad(elapsed)), 0, 32,
		[255, 200, 0, trans]  # Yellow
	)

	rl.DrawTextureEx(
		awd, tween.getPointOnLine(*start2, *end2, tween.easeInOutQuad(elapsed)), -90, 32,
		[255, 0, 55, trans]  # Red
	)

	rl.DrawTextureEx(
		awd, tween.getPointOnLine(*start3, *end3, tween.easeInOutQuad(elapsed)), -180, 32,
		[100, 200, 55, trans]  # Green
	)

	rl.DrawTextureEx(
		awd, tween.getPointOnLine(*start4, *end4, tween.easeInOutQuad(elapsed)), 90, 32,
Example #7
0
File: main.py Project: Pebaz/awd
        if not drag:
            window_vel = (mouse_pos - offset) * glm.vec2(1, 0.6) * 0.8
            if any(glm.isnan(window_vel)):
                window_vel = glm.vec2()

    rl.BeginDrawing()
    rl.ClearBackground([0] * 4)

    rl.BeginTextureMode(target)
    rl.ClearBackground([0] * 4)
    rl.DrawRectangle(0, 0, width, height, [0] * 4)

    # --------------------------------------------------------------------------

    s = tween.easeInOutQuad(elapsed) * 0.1
    if elapsed == 0:
        frame_flip = not frame_flip
    if frame_flip:
        scale = 1 + -s
    else:
        scale = 0.9 + s

    #rl.DrawTextureEx(frames[frame_counter], (0, 0), 0, scale, (0, 155, 255, 200))
    #rl.DrawTextureEx(frames[frame_counter], (6, 0), 0, scale, (55, 200, 55, 100))
    #rl.DrawTextureEx(frames[frame_counter], (0, 6), 0, scale, (255, 0, 55, 100))

    rl.DrawTexturePro(frames[frame_counter], [0, 0, width, height], [
        width // 2 + ((width - width * scale) // 2), height // 2 +
        ((height - height * scale) // 2), width * scale, height * scale
    ], [width // 2 + 6, height // 2 + 6], 0, (0, 155, 255, 200))