async def login_click(self, event): self.username = self.username_entry.GetValue() self.password = self.password_entry.GetValue() self.username.strip("\n") self.password.strip("\n") if not (self.username and self.password): # If either the username or password is missing, return return if len(self.username) < 3: # If the username is shorter than 3, return return # Disable the entries to stop people from typing in them. self.username_entry.Disable() self.password_entry.Disable() self.log_in_button.Disable() # Get the position of the inner_panel old_pos = self.inner_panel.GetPosition() start_point = old_pos[0] # Move the panel over to the right. for i in range(0, 512): wx.Yield() self.inner_panel.SetPosition( (int(start_point + pytweening.easeOutQuad(i / 512) * 512), old_pos[1])) # Hide the panel. The panel is already on the right so it's not visible anyways. self.inner_panel.Hide() self.web_view.SetSize((512, 600)) # Expand the window. for i in range(0, 88): self.SetSize((512, int(512 + pytweening.easeOutQuad(i / 88) * 88))) # Runs the user_login function. fd = await user_login(self.client, self.username, self.password) # Load the captcha URL. if fd: self.web_view.LoadURL(fd.url) self.web_view.Show() else: # No captcha needed. self.Close()
def breathe(count, step_sleep, repeat_sleep): for repeat in range(count): for dc in range(1000, -1, -2): ease = pytweening.easeOutQuad(dc / 1000) * 100 # print(ease) p.ChangeDutyCycle(ease) time.sleep(step_sleep) time.sleep(repeat_sleep) # print("ease out end") for dc in range(1, 1001, 2): ease = pytweening.easeInQuad(dc / 1000) * 100 # print(ease) p.ChangeDutyCycle(ease) time.sleep(step_sleep) # print("ease in end") time.sleep(repeat_sleep) # make sure in the end we are at full on, no matter what breathe does p.ChangeDutyCycle(100)
def get_trace_list(dis): result = [] pre = 0 for i in range(1, dis + 1): # 获取位移的比例 y = pytweening.easeOutQuad(i / dis) v = round(dis * y) result.append(v - pre) pre = v return result
def make_frame(t): #colorDiv = t + 0.01 #color1= [1.0 / colorDiv, 0.0, 0.0] #color2 = [0.0, 1.0 / colorDiv, 0.0] #gradient= gizeh.ColorGradient("linear",((0,(0,.5,1)),(1,(0,1,1))), xy1=(-cosR,-sinR), xy2=(cosR,sinR)) #gradRad1 = radius1 - 20 #gradRad2 = radius1 + 20 #gradient = gizeh.ColorGradient(type="radial", # stops_colors = [(0,color1),(1,color2)], # xy1=[0.0,0.0], xy2=[gradRad1,0.0], xy3 = [0.0,gradRad2]) surface = gizeh.Surface(W,H) # orbit halo #circle1 = gizeh.circle(radius1, xy = (W/2, H/2), stroke=gradient, stroke_width=5) #circle1.draw(surface) for i in range(numParticles): # Orbiting planet particle = particles[i] if (particle.easing == 1): angle = pytweening.linear((duration - t) / duration) * 360 * particle.direction elif (particle.easing == 2): angle = pytweening.easeInQuad((duration - t) / duration) * 360 * particle.direction elif (particle.easing == 3): angle = pytweening.easeOutQuad((duration - t) / duration) * 360 * particle.direction elif (particle.easing == 4): angle = pytweening.easeInOutQuad((duration - t) / duration) * 360 * particle.direction elif (particle.easing == 5): angle = pytweening.easeInSine((duration - t) / duration) * 360 * particle.direction elif (particle.easing == 6): angle = pytweening.easeOutSine((duration - t) / duration) * 360 * particle.direction elif (particle.easing == 7): angle = pytweening.easeInOutSine((duration - t) / duration) * 360 * particle.direction radians = math.radians(angle) cosR = math.cos(radians) sinR = math.sin(radians) x = W/2 + cosR * particle.orbit_radius y = H/2 + sinR * particle.orbit_radius fill = particle.color #circle = gizeh.circle(particle.radius, xy = (x, y), fill=(1,0,1)) circle = gizeh.circle(particle.radius, xy = (x, y), fill=fill) circle.draw(surface) return surface.get_npimage()
def ease_out_quad(n): """ 开始快,结尾慢(二次方倍速) """ return pytweening.easeOutQuad(n)
def getZoomScaleInt(height, width, zoomPosition): zoomScaleH = height + ((height / 20) * pytweening.easeOutQuad(zoomPosition) ) #divide by height zoomScaleW = width + ( (width / 20) * pytweening.easeOutQuad(zoomPosition)) #divide by height return int(zoomScaleH), int(zoomScaleW)
def buildSlideShow(imageList, descriptionList, path, titleText): FPS = 60 # Sets the FPS of the entire video currentFrame = 0 # The animation hasn't moved yet, so we're going to leave it as zero blendingFrames = 60 # Sets the amount of time that each transition should last for framesbetweenImages = 120 # Zoom Frames staticFrames = 120 #frames at the start are static text comes in here TO textFrames = 30 #Frames for text should be < static frames titleTextDuration = 280 im1 = Image.open(imageList[0]) # Load the image in im1 = image_rotate(im1) # Roate image if nessessacry im2 = im1 # Define a second image to force a global variable to be created width, height = im1.size # Get some stats on the image file to create the video with ## Limit W & H by 720p maintain aspect logging.info("h:" + str(height) + " W;" + str(width)) width, height = limitTopRes(width, height) logging.info("new res: h:" + str(height) + " W;" + str(width)) fourcc = cv2.VideoWriter_fourcc(*'XVID') video = cv2.VideoWriter(path + "/" + path + ".avi", fourcc, FPS, (width, height), True) #write gallery objects imageListFile = open(path + "/imageList-" + path + ".txt", "w+") imageListFile.write(str(imageList)) descriptionListFile = open(path + "/descriptionList-" + path + ".txt", "w+") descriptionListFile.write(str(descriptionList)) #Font Type font = 'MaxImpact.ttf' tileFontSize = getFontSize(titleText, height, width) for idx, val in enumerate(imageList): logging.info("start image: " + str(idx)) im1 = Image.open(val) fontSize, text = getFontSize(descriptionList[idx], height, width) #logging.info("fontSize: "+str(fontSize)) ##Get Staticframe lenght staticFrames = 80 + (len(text) * 4) if staticFrames > 660: staticFrames = 660 ##logging.info("staticFrames "+str(staticFrames)) buffer = (int(width - (width / 10)), int(height - (height / 10))) bufferOffset = (int(width / 20), int(height / 20)) bufferOffsetW = int(width / 20) bufferOffsetH = int(height / 9) imgTxt = ImageText(buffer, background=(255, 255, 255, 0)) imgTxtShadow = ImageText(buffer, background=(0, 0, 0, 0)) imgTxtWhiteShadow = ImageText(buffer, background=(80, 80, 80, 0)) textHeight, imgTxt, imgTxtShadow, imgTxtWhiteShadow = textBoxBuilder( text, height, width, bufferOffsetH, buffer, font, fontSize) textTitleHeight, imgTitleTxt, imgTitleTxtShadow, imgTitleTxtWhiteShadow = textBoxBuilder( titleText, height, width, bufferOffsetH, buffer, font, fontSize) #Open next image if idx == (len(imageList) - 1): logging.info("Last Image") im2 = Image.open(imageList[0]) else: logging.info(str(idx) + " len" + str(len(imageList))) im2 = Image.open(imageList[idx + 1]) outputimage = Image.new( 'RGBA', (width, height), (50, 50, 50, 255)) # Image all others will paste onto outputimageZoom = Image.new( 'RGBA', (width, height), (50, 50, 50, 255)) # Image zoomed others will paste onto im1 = image_rotate(im1) im1 = resize_image(im1, height, width) img_w, img_h = im1.size offset = ((width - img_w) // 2, (height - img_h) // 2) outputimage.paste(im1, offset) outputimageZoom.paste(im1, offset) #Output Blend Frames outputimageBlend = Image.new( 'RGBA', (width, height), (50, 50, 50, 255)) # Image all others will paste onto im2 = image_rotate(im2) im2 = resize_image(im2, height, width) img_wBlend, img_hBlend = im2.size offsetBlend = ((width - img_wBlend) // 2, (height - img_hBlend) // 2) outputimageBlend.paste(im2, offsetBlend) if (idx == 0): #first Image, show titleText on open fbi = ((currentFrame) + (framesbetweenImages + blendingFrames + staticFrames + titleTextDuration + textFrames)) while (currentFrame) < (fbi - blendingFrames - framesbetweenImages): #draw = ImageDraw.Draw(outputimage) #draw.text((100, 100), text, font=font, fill="blue") #FIrst will always start 0 frame currentTitleTextFrame = currentFrame if currentFrame <= textFrames: #Do animation for text title axis currentTextFrame = currentFrame / textFrames newTextHeight = int( (bufferOffsetW + textTitleHeight) * pytweening.easeOutQuad(currentTextFrame) ) - bufferOffsetW - textTitleHeight outputimage.paste(outputimageZoom, (0, 0)) outputimage.paste(imgTitleTxtShadow.image, (bufferOffsetW - 2, newTextHeight + 2), imgTitleTxtShadow.image) outputimage.paste(imgTitleTxtShadow.image, (bufferOffsetW - 1, newTextHeight + 1), imgTitleTxtShadow.image) outputimage.paste(imgTitleTxtWhiteShadow.image, (bufferOffsetW + 1, newTextHeight - 1), imgTitleTxtWhiteShadow.image) outputimage.paste(imgTitleTxt.image, (bufferOffsetW, newTextHeight), imgTitleTxt.image) #logging.info("textHeight N "+str(newTextHeight)) video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 elif currentFrame <= (titleTextDuration): #Static video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 #STart Last Frames ANimation here elif currentFrame <= textFrames + titleTextDuration: currentTextFrame = (currentFrame - titleTextDuration) / textFrames newTextHeight = int(height - ( (height - textHeight) * pytweening.easeOutQuad(currentTextFrame))) newTextTitleHeight = int( (bufferOffsetW + textTitleHeight) * pytweening.easeOutQuad(-(currentTextFrame - 1)) ) - bufferOffsetW - textTitleHeight outputimage.paste(outputimageZoom, (0, 0)) outputimage.paste( imgTitleTxtShadow.image, (bufferOffsetW - 2, newTextTitleHeight + 2), imgTitleTxtShadow.image) outputimage.paste( imgTitleTxtShadow.image, (bufferOffsetW - 1, newTextTitleHeight + 1), imgTitleTxtShadow.image) outputimage.paste( imgTitleTxtWhiteShadow.image, (bufferOffsetW + 1, newTextTitleHeight - 1), imgTitleTxtWhiteShadow.image) outputimage.paste(imgTitleTxt.image, (bufferOffsetW, newTextTitleHeight), imgTitleTxt.image) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 2, newTextHeight + 2), imgTxtShadow.image) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 1, newTextHeight + 1), imgTxtShadow.image) outputimage.paste(imgTxtWhiteShadow.image, (bufferOffsetW + 1, newTextHeight - 1), imgTxtShadow.image) outputimage.paste(imgTxt.image, (bufferOffsetW, newTextHeight), imgTxt.image) #logging.info("textHeight N "+str(newTextHeight)) video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 else: video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 else: fbi = ((currentFrame) + (framesbetweenImages + blendingFrames + staticFrames)) #Starts static frames text belnd will start here while (currentFrame) < (fbi - blendingFrames - framesbetweenImages): #logging.info("static frames"+str(currentFrame)) #draw = ImageDraw.Draw(outputimage) #draw.text((100, 100), text, font=font, fill="blue") currentTextFrame = textFrames - ( (fbi - blendingFrames - framesbetweenImages - staticFrames) + textFrames - currentFrame) if currentTextFrame <= textFrames: #Do animation for text Y axis #Gets range between 0 and 1 #logging.info("currentTextFrame "+str(currentTextFrame)) currentTextFrame = currentTextFrame / textFrames #logging.info("currentTextFrame - "+str(currentTextFrame)) newTextHeight = int(height - ( (height - textHeight) * pytweening.easeOutQuad(currentTextFrame))) outputimage.paste(outputimageZoom, (0, 0)) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 2, newTextHeight + 2), imgTxtShadow.image) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 1, newTextHeight + 1), imgTxtShadow.image) outputimage.paste(imgTxtWhiteShadow.image, (bufferOffsetW + 1, newTextHeight - 1), imgTxtShadow.image) outputimage.paste(imgTxt.image, (bufferOffsetW, newTextHeight), imgTxt.image) #logging.info("textHeight N "+str(newTextHeight)) video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 else: #imgTxt.save('sample-imagetext.png')() #outputimage.paste(imgTxt.image, (bufferOffsetW, textHeight), imgTxt.image) video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 #Starts Zoom Frame while (currentFrame + 0) < (fbi - blendingFrames): #logging.info("zoom frames"+str(currentFrame)) #find zoom based on frame position var is from 0 to 1 zoomPosition = -(((fbi - blendingFrames - currentFrame) / (framesbetweenImages))) + 1 #Find dimensions based on zoom position zoomScaleH, zoomScaleW = getZoomScaleInt(height, width, zoomPosition) #logging.info("next-W"+str(zoomScaleW)) #logging.info("next-H"+str(zoomScaleH)) #Check if zoom frames stay similar if not we can antialias frames via blend zoomFrameCheck = 1 keepCheck = "true" if (zoomPosition < 1): while keepCheck == "true": if (zoomFrameCheck < (fbi - currentFrame - blendingFrames)): nextZoomPosition = -( ((fbi - blendingFrames - (currentFrame + zoomFrameCheck)) / framesbetweenImages)) + 1 nextZoomScaleH, nextZoomScaleW = getZoomScaleInt( height, width, nextZoomPosition) #If the frames zoom size are less than 2 pixel differences #then skip ahead this will determine how many inbwteen alias frames there #are with the var ##zoomFrameCheck## if ((nextZoomScaleH - zoomScaleH < 3) or (nextZoomScaleW - zoomScaleW < 3)): zoomFrameCheck += 1 else: #logging.info("Size Changed after - " + str(zoomFrameCheck) + " frames") keepCheck = "false" else: #logging.info("Size Changed after and maxed - " + str(zoomFrameCheck) + " frames") keepCheck = "false" #If true this starts rendering inital frame and inbetween frames if (zoomFrameCheck > 1): #get first frames outputimageZoomPaste = outputimageZoom.resize( (zoomScaleW, zoomScaleH), Image.ANTIALIAS) newOutputimageZoomPaste = Image.new('RGBA', (width, height), (50, 50, 255, 255)) offsetZ = (math.floor((width - zoomScaleW) // 2), math.floor((height - zoomScaleH) // 2)) newOutputimageZoomPaste.paste(outputimageZoomPaste, offsetZ) #get next image size up for blend zoomPositionBlend = -(((fbi - blendingFrames - (currentFrame + zoomFrameCheck - 1)) / framesbetweenImages)) + 1 zoomScaleHblend, zoomScaleWblend = getZoomScaleInt( height, width, zoomPositionBlend) #logging.info(fbi - blendingFrames - currentFrame + zoomFrameCheck - 1) #logging.info(zoomPositionBlend) outputimageZoomPasteBlend = outputimageZoom.resize( (zoomScaleWblend, zoomScaleHblend), Image.ANTIALIAS) #Crop Image for blend newOutputimageZoomPasteBlend = Image.new( 'RGBA', (width, height), (50, 50, 50, 255)) #logging.info("big-W"+str(zoomScaleWblend)) #logging.info("big-H"+str(zoomScaleHblend)) #logging.info(zoomScaleHblend) offsetZLarge = (math.floor((width - zoomScaleWblend) // 2), math.floor((height - zoomScaleHblend) // 2)) newOutputimageZoomPasteBlend.paste(outputimageZoomPasteBlend, offsetZLarge) for x in range(1, zoomFrameCheck): blendPercent = ((x - 1) / (zoomFrameCheck - 1)) outputimageZoomPasteBlendOutput = Image.blend( newOutputimageZoomPaste, newOutputimageZoomPasteBlend, blendPercent) #outputimage = Image.new('RGBA', (zoomScaleWblend, zoomScaleHblend), (50, 50, 255, 255)) #outputimage.paste(outputimageZoomPasteBlendOutput, offsetZ) #draw = ImageDraw.Draw(outputimage) #draw.text((100, 100), str(blendPercent), font=font, fill="blue") outputimageZoomPasteBlendOutput.paste( imgTxtShadow.image, (bufferOffsetW - 2, textHeight + 2), imgTxtShadow.image) outputimageZoomPasteBlendOutput.paste( imgTxtShadow.image, (bufferOffsetW - 1, textHeight + 1), imgTxtShadow.image) outputimageZoomPasteBlendOutput.paste( imgTxtWhiteShadow.image, (bufferOffsetW + 1, newTextHeight - 1), imgTxtShadow.image) outputimageZoomPasteBlendOutput.paste( imgTxt.image, (bufferOffsetW, textHeight), imgTxt.image) video.write( cv2.cvtColor(np.array(outputimageZoomPasteBlendOutput), cv2.COLOR_RGB2BGR)) #logging.info("Precent:"+str(blendPercent)) currentFrame += 1 #Or else just render single fream else: #logging.info(currentFrame) outputimageZoomPaste = outputimageZoom.resize( (zoomScaleW, zoomScaleH), Image.ANTIALIAS) offsetZ = (math.floor((width - zoomScaleW) // 2), math.floor((height - zoomScaleH) // 2)) outputimage.paste(outputimageZoomPaste, offsetZ) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 2, textHeight + 2), imgTxtShadow.image) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 1, textHeight + 1), imgTxtShadow.image) outputimage.paste(imgTxtWhiteShadow.image, (bufferOffsetW + 1, newTextHeight - 1), imgTxtShadow.image) outputimage.paste(imgTxt.image, (bufferOffsetW, textHeight), imgTxt.image) #draw = ImageDraw.Draw(outputimage) #draw.text((100, 100), 'Hello World!', font=font, fill="blue") video.write( cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) #logging.info(currentFrame) currentFrame += 1 logging.info("Blending frames image: " + str(idx) + " curent Frame:" + str(currentFrame)) #Start Blending frame while (currentFrame < fbi): #Blending frames here framesLeft = (currentFrame - fbi) + blendingFrames + 1 easePosition = -(((fbi - currentFrame - 1) / blendingFrames) - 1) offesetW = (width / blendingFrames) * framesLeft outputimage = Image.new('RGBA', (width, height), (50, 50, 50, 255)) outputimage.paste(outputimageZoomPaste, offsetZ) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 1, textHeight + 1), imgTxtShadow.image) outputimage.paste(imgTxtShadow.image, (bufferOffsetW - 2, textHeight + 2), imgTxtShadow.image) outputimage.paste(imgTxtWhiteShadow.image, (bufferOffsetW + 1, newTextHeight - 1), imgTxtShadow.image) outputimage.paste(imgTxt.image, (bufferOffsetW, textHeight), imgTxt.image) if idx % 2 == 0: outputimage.paste( outputimageBlend, (-width + int(pytweening.easeInOutQuad(easePosition) * width), 0)) else: outputimage.paste( outputimageBlend, (width - int(pytweening.easeInOutQuad(easePosition) * width), 0)) video.write(cv2.cvtColor(np.array(outputimage), cv2.COLOR_RGB2BGR)) currentFrame += 1 try: logging.info("Video saving locally") video.release() logging.info("Video saved") return str(path + "/" + path + ".avi") except: logging.info("Video failed on realese") return false