def _next_frame(self, frame_index: int, im1: image_types, im2: image_types, **params) -> dict: print('BlendingTransition::_next_frame STARTS HERE') result = locals() im1 = utils.verify_alpha_channel( utils.convert_image_type(im1, np.ndarray)) im2 = utils.verify_alpha_channel( utils.convert_image_type(im2, np.ndarray)) print('im1:', type(im1), '\tim2:', type(im2)) # im1 = PilImage.blend(im1=im1, im2=im2, alpha=frame_index/self.frames_count) # print(f'\nSIZES: {utils.get_image_size(im1)} ; {utils.get_image_size(im2)}') frame_resolution = params.get('frame_resolution', (1920, 1080)) print('frame_resolution:', frame_resolution) # im1 = cv2.addWeighted(im1, frame_index/self.frames_count, im2, frame_index/self.frames_count, 0.5) ts = blend_images_effect(im2, im1, frame_index / self.frames_count, 1 - frame_index / self.frames_count, resolution=frame_resolution) result['to_save'] = ts result['im1'] = im1 result['im2'] = im2 print('BlendingTransition::_next_frame ENDS HERE') return result
def set_params(self, image: image_types, frames_count: int = 25, frame_resolution: list or tuple = (1920, 1080)): if len(frame_resolution) != 2: raise AttributeError( 'ImageTravel param frame_resolution must be tuple of 2 (width, height)' ) res_width, res_height = frame_resolution if len(image) == 2: image = image[0] image = utils.verify_alpha_channel(utils.load_image(image, np.ndarray)) frame_height, frame_width = image.shape[:2] if res_width > frame_width or res_height > frame_height: raise AttributeError( 'ImageTravel image size is lower than frame_resolution') self.image = image self.frame_resolution = frame_resolution crop_params = [] base_width, base_height = int(0.75 * res_width), int(0.75 * res_height) frame_width_step = int((res_width - base_width) / frames_count) frame_height_step = int((res_height - base_height) / frames_count) for i in range(1, frames_count + 1): crop_params.append((0, 0, base_width + frame_width_step * i, base_height + frame_height_step * i)) self.crop_params = utils.CircleList(crop_params, frames_count)
def __init__( self, image1: image_types or None = None, image2: image_types or None = None, dest_dir: str = 'rendered', frames_count=50, **kwargs ): # -~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^/-_/~-~/*^-. """ Base class for animations between images. :param image1: look at utils.image_types :param image2: look at utils.image_types :param dest_dir: there rendered frames will be saved :param frames_count: number of frames to render :param kwargs: usable keys: > parent_timeline: Timeline > prefix: str -> prefix in file name of rendered frame > extension: str -> extension of output file (rendered frame) > load_as: [PillowImage, np.ndarray] -> tells how image file should be opened > preprocess_first, preprocess_second, postprocess_first, postprocess_second: - list of callbacks to predicates or CircleList(list of callbacks/predicates) or ImageTravel instance - Type name of ImageTravel class (type name, not instance or its __init__ method call! """ '''self.fill_later = False if kwargs.get('fill_later', False) is True: self.fill_later = True return''' self.im1 = self.im2 = None if image1 is not None and image2 is not None: self.im1, self.im2 = utils.verify_alpha_channel( image1), utils.verify_alpha_channel(image2) '''if image1 is None and image2 is None: err = 'If params image1 is None and image2 is None, pass parent_timeline argument of Timeline instance' if 'parent_timeline' not in kwargs: raise AttributeError(err) if not isinstance(kwargs['parent_timeline'], RenderController.Timeline): raise AttributeError(err)''' if 'prefix' not in kwargs: class_name = str(self.__class__).split("'")[1][:-2] if '.' in class_name: self.prefix = class_name.split('.')[-1] else: self.prefix = kwargs['prefix'] if 'extension' not in kwargs: kwargs['extension'] = 'png' else: if kwargs['extension'] not in ['jpg', 'png']: kwargs['extension'] = 'png' if 'frame_resolution' not in kwargs: if self.im1 is None: tl: RenderControllers.TimelineModel = kwargs.get( 'parent_timeline', False) if not tl: raise AttributeError('Cannot obtain resolution of frame') kwargs['frame_resolution'] = tl.get_frame_resolution() else: kwargs['frame_resolution'] = utils.get_image_size(self.im1) self.dest_dir = dest_dir self.frames_count = frames_count self.kwargs = kwargs self.name_counter = 0 if 'load_as' not in kwargs: self.kwargs['load_as'] = PillowImage if isinstance(image1, np.ndarray): self.kwargs['load_as'] = np.ndarray self.effects = {} self.frame_index = -1
def render_project(self, start_frame: int = 0, stop_frame: int = -1, frame_resolution=(-1, -1), **kwargs): start_frame = 0 if start_frame < 0 else start_frame stop_frame = \ self.total_slide_duration if stop_frame == -1 or stop_frame > self.total_slide_duration else stop_frame file_names = [] current_transition = None frame_step = 1 if start_frame < stop_frame else -1 frame_resolution = self.frame_resolution if frame_resolution == ( -1, -1) else self.frame_resolution for current_frame_index in range(start_frame, stop_frame, frame_step): print('WIZARD RENDER_PROJECT MAIN_LOOP: ', current_frame_index) self.global_frame_index = current_frame_index transition_dict = self.while_transition() current_frame_image, handler_index = self.pick_image_for_frame() if isinstance(transition_dict, dict): """ TRANSITION BETWEEN IMAGES """ if current_transition is None: current_transition = transition_dict['transition']( frames_count=abs(transition_dict['start'] - transition_dict['stop']), frame_resolution=frame_resolution) try: second = self.image_handlers[transition_dict['nr'] + 1].get_image() except IndexError: second = current_frame_image traveled_images_dict = self.images_traveller.next_frame(images=[ current_frame_image, # self.image_handlers[transition_dict['nr'] + 1].get_image() second ]) for i in range(len(traveled_images_dict['images'])): traveled_images_dict['images'][ i] = utils.verify_alpha_channel( traveled_images_dict['images'][i]) if utils.get_image_size(traveled_images_dict['images'] [i]) != frame_resolution: traveled_images_dict['images'][ i] = cv_effects.resize_image( image=utils.verify_alpha_channel( traveled_images_dict['images'][i]), dest_resolution=frame_resolution) first_image, second_image = traveled_images_dict['images'] rendered_frame_dict = current_transition.next_frame( im1=first_image, im2=second_image, frame_resolution=frame_resolution) else: """ USUAL IMAGE TRAVEL """ current_transition = None traveled_images_dict = self.images_traveller.next_frame( images=[ current_frame_image, ]) rendered_frame_dict = { 'to_save': traveled_images_dict['images'][0] } if frame_resolution != self.frame_resolution: rendered_frame_dict['to_save'] = cv_effects.resize_image( image=rendered_frame_dict['to_save'], dest_image_type=None, # do not touch type of image, stay it dest_resolution=frame_resolution) file_name = f'rendered/{self.global_frame_index}.jpg' utils.save_image(rendered_frame_dict['to_save'], file_name) file_names.append(file_name) print('\nWIZARD RENDER_PROJECT LOOP END\n') # ------------------------------------------------------------------------------------------ # Do it after rendering frames print('\n\nRendering slideshow frames complete') if kwargs.get('avi', False): print('\nRendering avi file') TimelineModel.render_avi_no_sound('rendered', kwargs['avi'], self.fps) if kwargs.get('show_avi', False) is True: os.system('start ' + kwargs['avi']) if kwargs.get('del_frames', False) is True: frames_dir = 'rendered' if frames_dir[-1] not in ['\\', '/']: frames_dir += os.sep for file in os.listdir('rendered'): os.remove(frames_dir + file) if kwargs.get('show_dir', False) is True: if kwargs.get('del_frames', False) is False: TimelineModel.show_rendered_directory('rendered')