def create_timelapse_video(input_image, image_id, path):

    print("Matched with one of the frame of video with id: " + image_id)

    images = load_frames(path + image_id + ".mp4")
    total_frame = len(images)

    image_files = []
    count = 0

    printProgressBar(0,
                     total_frame,
                     prefix='Progress:',
                     suffix='Complete',
                     length=50)
    for image in images:
        count += 1
        printProgressBar(count + 1,
                         total_frame,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        if not image is None:
            styled_image = color_transfer(image, input_image)
            name = "styled_image" + str(count) + ".jpg"
            cv2.imwrite(name, styled_image)
            read_frame = cv2.imread(name)
            image_files.append(name)

    clip = ImageSequenceClip(image_files, fps=10)
    clip.write_videofile('output.mp4')
    cv2.destroyAllWindows()
    for k in image_files:
        os.system("rm " + k)
Esempio n. 2
0
def write_gif(filepath, images, fps=24):
    """Saves a sequence of images as an animated GIF.
    Parameters
    ----------
    filepath: str
        The filepath ending with *.gif where to save the file.
    images: list(3-D array) or 4-D array
        A list of images or a 4-D array where the first dimension
        represents the time axis.
    fps: int, optional
        The frame rate.
    """
    # to list
    if not isinstance(images, list):
        splitted = np.split(images, images.shape[0])
        images = [np.squeeze(s, axis=(0, )) for s in splitted]
    elif len(images) == 0:
        return

    # ensure directory exists
    dirpath = os.path.dirname(filepath)
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)

    # scale factor for float
    factor = 1
    if tt.utils.image.is_float_image(images[0]):
        factor = 255

    clip = ImageSequenceClip([img * factor for img in images], fps=fps)
    clip.write_gif(filepath, verbose=False)
def gif(filename, array, fps=10):
    """Creates a gif given a stack of images using moviepy
    Notes
    -----
    works with current Github version of moviepy (not the pip version)
    https://github.com/Zulko/moviepy/commit/d4c9c37bc88261d8ed8b5d9b7c317d13b2cdf62e
    Usage
    -----
    >>> X = randn(100, 64, 64)
    >>> gif('test.gif', X)
    Parameters
    ----------
    filename : string
        The filename of the gif to write to
    array : array_like
        A numpy array that contains a sequence of images
    fps : int
        frames per second (default: 10)
    """
    from moviepy.video.io.ImageSequenceClip import ImageSequenceClip

    # ensure that the file has the .gif extension
    fname, _ = os.path.splitext(filename)
    filename = fname + '.gif'

    # copy into the color dimension if the images are black and white
    if array.ndim == 3:
        array = array[..., np.newaxis] * np.ones(3)

    # make the moviepy clip
    clip = ImageSequenceClip(list(array), fps=fps)
    clip.write_gif(filename, fps=fps)
    return clip
    def run(self):
        clip = ImageSequenceClip(self.list, fps=self.fps)
        clip.write_videofile(self.path)  # to video
        # clip.write_gif(self.path.replace("mp4", "gif"))  # to gif

        # """生成gif"""
        # imageio.mimsave('what.gif',self.gif_list)
        self.trigger.emit()
        shutil.rmtree('temp')
Esempio n. 5
0
    def _generate_from_frames(self, frames: List[Frame], fps):
        images = []

        for f in frames:
            images.append(f.image)

        clip = ImageSequenceClip(images, fps=fps)
        audio_clip = self.audio.subclip(frames[0].timestamp,
                                        frames[-1].timestamp)
        new_clip = clip.set_audio(audio_clip)

        return new_clip
Esempio n. 6
0
def test_exifrotate():
    image_file = 'media/balloons_portrait.jpg'
    with ImageSequenceClip([image_file], fps=1) as clip:
        frame = clip.get_frame(0)
        assert frame.meta['EXIF_MAIN']['ExifImageWidth'] == 4032
        assert frame.meta['EXIF_MAIN']['ExifImageHeight'] == 3024
        assert frame.meta['EXIF_MAIN']['Orientation'] == 6
        assert clip.size == (3024, 4032)

    with ImageSequenceClip([image_file], fps=1,
                           imageio_params={'exifrotate': False}) as clip:
        assert clip.size == (4032, 3024)
Esempio n. 7
0
def test_1():
    images=[]
    durations=[]

    for i in range(5):
        durations.append(i)
        images.append("media/python_logo.png")
        durations.append(i)
        images.append("media/python_logo_upside_down.png")

    clip = ImageSequenceClip(images, durations=durations)
    assert clip.duration == sum(durations)
    clip.write_videofile("/tmp/ImageSequenceClip1.mp4", fps=30)
Esempio n. 8
0
def test_1():
    images = []
    durations = []

    for i in range(5):
        durations.append(i)
        images.append("media/python_logo.png")
        durations.append(i)
        images.append("media/python_logo_upside_down.png")

    clip = ImageSequenceClip(images, durations=durations)
    assert clip.duration == sum(durations)
    clip.write_videofile("/tmp/ImageSequenceClip1.mp4", fps=30)
Esempio n. 9
0
def do_actual_lapse(lapse_instance_id,
                    fps,
                    output_size,
                    image_path_list=[],
                    image_id_list=[]):
    image_path_list = [str(i) for i in image_path_list
                       ]  #forcing str moviepy/issues/293
    # print image_path_list
    try:
        clip = ImageSequenceClip(image_path_list, fps=fps)
    except ValueError as exc:
        [generate_thumbs.delay(i) for i in image_id_list]
        do_actual_lapse.retry(kwargs={
            "lapse_instance_id": lapse_instance_id,
            "fps": fps,
            "output_size": output_size,
            "image_path_list": image_path_list,
            "image_id_list": image_id_list
        },
                              exc=exc,
                              countdown=15)
    lapse_instance = AutoLapseInstance.objects.get(pk=lapse_instance_id)
    uuid = shortuuid.uuid()

    alfile = AutoLapseInstanceFile.objects.create(instance=lapse_instance,
                                                  output_size=output_size,
                                                  uuid=uuid)
    path_prefix = target_path_generator(alfile, prefix=settings.MEDIA_ROOT)
    if not os.path.exists(path_prefix):
        os.makedirs(path_prefix)
    # print path_prefix
    if clip.h % 2 != 0:
        clip.size = (clip.w, clip.h - 1)
    if clip.w % 2 != 0:
        clip.size = (clip.w - 1, clip.h)

    clip.write_videofile(
        video_mp4_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_videofile(
        video_webm_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_gif(gif_name_generator(alfile, prefix=settings.MEDIA_ROOT))

    alfile.file_video_mp4 = video_mp4_name_generator(alfile)
    alfile.file_video_webm = video_webm_name_generator(alfile)
    alfile.file_video_gif = gif_name_generator(alfile)

    alfile.save()

    lapse_instance.status = LapseInstanceStatus.COMPLETED
    lapse_instance.save()
def gen_video(data_dir, output_data_dir, sess, image_shape, input_image, keep_prob, logits):
    if os.path.exists(output_data_dir):
        shutil.rmtree(output_data_dir)
    os.makedirs(output_data_dir, exist_ok=True)

    for name, image in gen_test_output(sess, logits, keep_prob, input_image, data_dir, image_shape):
        file_name = os.path.join(output_data_dir, name)
        scipy.misc.imsave(file_name, image)

    vid_clip = ImageSequenceClip(output_data_dir, fps=10)
    result_video = os.path.join(output_data_dir, "result.mp4")
    vid_clip.write_videofile(result_video)

    return result_video
Esempio n. 11
0
def make_weekly_movie(cam: Cam, executor):
    root = Path(conf.root_dir) / 'data' / cam.name
    path = root / 'regular' / 'imgs'
    start = pendulum.yesterday()
    logger.info(f'Running make weekly movie for ww{start.week_of_year}')
    week_ago = start.subtract(weeks=1).date()
    sequence = []
    morning = pendulum.Time(6)
    evening = pendulum.Time(18)
    for day in sorted(list(path.iterdir()), key=lambda x: pendulum.from_format(x.name, 'DD_MM_YYYY')):
        if pendulum.from_format(day.name, 'DD_MM_YYYY').date() > week_ago:
            for img in sorted(day.iterdir()):
                t_img = img.name.split('.')[0]
                t_img = pendulum.from_format(t_img, 'DD_MM_YYYY_HH-mm-ss').time()
                if morning < t_img < evening:
                    sequence.append(str(img))
    sequence = check_sequence_for_gray_images(sequence, executor)
    txt_clip = make_txt_movie(sequence, 100, executor)
    logger.info(f'Composing clip for weekly movie ww{start.week_of_year}')
    image_clip = ImageSequenceClip(sequence, fps=100)
    clip = CompositeVideoClip([image_clip, txt_clip.set_position(('right', 'top'))], use_bgclip=True)
    movie_path = root / 'regular' / 'weekly' / f'ww{start.week_of_year}.mp4'
    movie_path.parent.mkdir(parents=True, exist_ok=True)
    clip.write_videofile(str(movie_path), audio=False)
    logger.info(f'Finished with clip for weekly movie ww{start.week_of_year}')
    return Movie(clip.h, clip.w, movie_path, sequence[seq_middle(sequence)])
Esempio n. 12
0
def make_txt_movie(sequence, fps):
    logger.debug('Creating txt movie..')
    txt_clip = []
    with futures.ThreadPoolExecutor() as pool:
        for item in pool.map(ts_clip, sequence):
            txt_clip.append(item)
    return ImageSequenceClip(txt_clip, fps=fps)
Esempio n. 13
0
def video_frame(fct_frame, **kwargs):
    """
    Creates a video from drawing or images.
    *fct_frame* can either be a function which draws a picture at time *t*
    or a list of picture names or a folder.
    Créé une vidéo à partir de dessins ou d'images.
    *fct_frame* est soit une fonction qui dessine chaque image à chaque instant *t*,
    une liste de noms d'images ou un répertoire.

    @param      fct_frame       function like ``def make_frame(t: float) -> numpy.ndarray``,
                                or list of images or folder name
    @param      kwargs          additional arguments for function
                                `make_frame <https://zulko.github.io/moviepy/getting_started/videoclips.html#videoclip>`_
    @return                     :epkg:`VideoClip`
    """
    if isinstance(fct_frame, str):
        if not os.path.exists(fct_frame):
            raise FileNotFoundError(
                "Unable to find folder '{0}'".format(fct_frame))
        imgs = os.listdir(fct_frame)
        exts = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff'}
        imgs = [
            os.path.join(fct_frame, _) for _ in imgs
            if os.path.splitext(_)[-1].lower() in exts
        ]
        return video_frame(imgs, **kwargs)
    elif isinstance(fct_frame, list):
        for img in fct_frame:
            if not os.path.exists(img):
                raise FileNotFoundError(
                    "Unable to find image '{0}'".format(img))
        return ImageSequenceClip(fct_frame, **kwargs)
    else:
        return VideoClip(fct_frame, **kwargs)
Esempio n. 14
0
def clip_to_html(clip: Union[VideoClip, np.ndarray],
                 verbose=False,
                 fps=24,
                 **kwargs) -> str:
    """Convert a MoviePy clip to an HTML string.

    .. code-block:: python

        from IPython.display import display
        clip = ImageSequenceClip(list(np_video))
        display(clip_to_html(clip))

    Args:
        clip: MoviePy clip.
        verbose: Whether to print out FFmpeg information during encoding
        fps: FPS of clip
        **kwargs: Any kwargs to pass down to :py:func:`html_embed`

    Returns:
        String of HTML with a ``<video>`` tag and base64 encoded media.
        Useful for use with ``IPython.display.display`` to show videos.
    """
    if isinstance(clip, (np.ndarray, list)):
        clip = ImageSequenceClip(list(clip), fps=fps)
    assert hasattr(clip, "write_videofile")
    if clip.fps is None:
        clip.fps = fps
    if not verbose:
        rd_kwargs = {"logger": None}
    else:
        rd_kwargs = dict()
    return html_embed(clip, rd_kwargs=rd_kwargs, **kwargs)
Esempio n. 15
0
def make_video(curdir, nbjobs=1):
    print("looking up files in ", curdir)
    rep_out = re.match(r".*rep(?P<num>\d+).*", curdir)
    if rep_out:
        rep = int(rep_out.group('num'))
    else:
        rep = 0

    allfiles = glob(j(curdir, 'screenshot_custom_*.png'))
    if rep < replim:  # Only deal with the first reps
        filesbygen = defaultdict(list)
        for fname in allfiles:
            name = basename(fname)
            out = re.match(r"""screenshot_custom_.+_
                           gen_(?P<gen>\d+)
                           (?:_ind_(?P<ind>\d+))?
                           .*\.png""", name, re.VERBOSE)
            if out:
                gen = out.group("gen")
                if out.group("ind"):
                    gen += "i" + out.group("ind")
                filesbygen[gen].append(fname)
        for gen, files in filesbygen.items():
            print("making movie for {}".format(gen))
            outname = j(curdir, '../mov_{}.mp4'.format(gen))
            try:
                newmov = ImageSequenceClip(sorted(files), fps=60)
            except:
                print(files)
                raise
            if exists(outname):
                print("{} already found, concatenating.".format(basename(outname)))
                prev = VideoFileClip(outname)
                newmov = concatenate_videoclips([prev, newmov])
            verbose = sys.stdout.isatty()
            newmov.write_videofile(outname, fps=60, verbose=verbose, progress_bar=verbose, threads=nbjobs)
            print("{} created".format(basename(outname)))
            for pngfile in files:
                os.remove(pngfile)
    else:
        print("Do not make movie for this rep {}, already have others".format(rep))
        for pngfile in allfiles:
            os.remove(pngfile)
def make_movie(movie_name,
               input_folder,
               output_folder,
               file_format,
               fps,
               output_format='mp4',
               reverse=False):
    """
    function that makes the movie of the images data

    :param movie_name: name of the movie
    :type movie_name: string
    :param input_folder: folder where the image series is located
    :type input_folder: string
    :param output_folder: folder where the movie will be saved
    :type output_folder: string
    :param file_format: sets the format of the files to import
    :type file_format: string
    :param fps: frames per second
    :type fps: numpy, int
    :param output_format: sets the format for the output file
                          supported types .mp4 and gif
                          animated gif create large files
    :type output_format: string (, optional)
    :param reverse: sets if the movie will be one way of there and back
    :type reverse: bool  (, optional)

    """

    # searches the folder and finds the files
    file_list = glob.glob('./' + input_folder + '/*.' + file_format)

    # Sorts the files by number makes 2 lists to go forward and back
    list.sort(file_list)
    file_list_rev = glob.glob('./' + input_folder + '/*.' + file_format)
    list.sort(file_list_rev, reverse=True)

    # combines the file list if including the reverse
    if reverse:
        new_list = file_list + file_list_rev
    else:
        new_list = file_list

    if output_format == 'gif':
        # makes an animated gif from the images
        clip = ImageSequenceClip(new_list, fps=fps)
        clip.write_gif(output_folder + '/{}.gif'.format(movie_name), fps=fps)
    else:
        # makes and mp4 from the images
        clip = ImageSequenceClip(new_list, fps=fps)
        clip.write_videofile(output_folder + '/{}.mp4'.format(movie_name),
                             fps=fps)
Esempio n. 17
0
def test_3():
    images = []
    durations = [2]

    images.append("media/grayscale_landscape.png")

    # image is grayscale, should be transformed into rgb
    with ImageSequenceClip(images, durations=durations) as clip:
        assert clip.duration == sum(durations)
        clip.write_videofile(os.path.join(TMP_DIR, "ImageSequenceClip3.mp4"),
                             fps=30)
Esempio n. 18
0
def save_video(frames, path, fps=15):
    from moviepy.video.io.ImageSequenceClip import ImageSequenceClip

    temp_dir = tempfile.TemporaryDirectory()
    logger.info("saving video",
                num_frames=len(frames),
                fps=fps,
                path=path,
                temp_dir=temp_dir.name)
    try:
        for i, frame in enumerate(tqdm(frames)):
            if torch.is_tensor(frame):
                frame = frame.permute(1, 2, 0).detach().cpu().numpy()
            frame_path = Path(temp_dir.name, f'{i:08d}.jpg')
            imageio.imsave(frame_path, (frame * 255).astype(np.uint8))

        video = ImageSequenceClip(temp_dir.name, fps=fps)
        video.write_videofile(str(path), preset='ultrafast', fps=fps)
    finally:
        temp_dir.cleanup()
def test_2():
    images = []
    durations = []

    durations.append(1)
    images.append("media/python_logo.png")
    durations.append(2)
    images.append("media/matplotlib_demo1.png")

    #images are not the same size..
    with pytest.raises(Exception):
        ImageSequenceClip(images, durations=durations).close()
Esempio n. 20
0
def narrated_image_seq(narration_path, image_dir, fmt):
    """
    Create video which displays looping slideshow of images
    and attaches audio of narrator reading section text."
    """
    audio_clip_text = AudioFileClip(narration_path + f'_text.{fmt}').set_fps(1)
    images = files_in_directory(image_dir)
    return (ImageSequenceClip(
        sequence=images,
        durations=const_list(IMG_DURATION, len(images)),
        load_images=True).set_position(('center', 400)).fx(
            vfx.loop,
            duration=audio_clip_text.duration).set_audio(audio_clip_text))
Esempio n. 21
0
def test_1():
    images=[]
    durations=[]

    for i in range(5):
        durations.append(i)
        images.append("media/python_logo.png")
        durations.append(i)
        images.append("media/python_logo_upside_down.png")

    with ImageSequenceClip(images, durations=durations) as clip:
        assert clip.duration == sum(durations)
        clip.write_videofile(os.path.join(TMP_DIR, "ImageSequenceClip1.mp4"), fps=30)
def merge_images_and_audio(images: DataList, audio: np.ndarray,
                           video_duration: float, sound_hz: int,
                           video_name: str):
    """
    Creates video with sound from image list and music.

    Args:
        images: List of images represented by a h x w x 3 numpy array.
        audio: A Numpy array representing the sound, of size Nx1 for mono, Nx2 for stereo.
        video_duration: Duration of the video in seconds (should be the same as the audio file).
        sound_hz: The hz of the audio file.
        video_name: The name of the resulting video file
    """
    # todo there is still a problem with the audio here
    # the audio should always contain two channels
    # then the hz should also work for mono and dual
    clip = ImageSequenceClip(images,
                             durations=[video_duration / len(images)] *
                             len(images))
    s = audio.reshape((len(audio), 2))  # transform it from (N) to (N, 2)
    audio = AudioArrayClip(s, sound_hz)
    clip = clip.set_audio(audio)
    clip.write_videofile(video_name, fps=len(images) / video_duration)
Esempio n. 23
0
def display_video(video: Union[torch.Tensor, np.ndarray, List[np.ndarray]],
                  format="THWC",
                  fps=12):
    """
    Args:
        video: Video array or tensor with values ranging between 0--255.
        format: TCHW in any order, describing the data layour of the video array
    """
    if isinstance(video, list):
        video = np.stack(video)
    if isinstance(video, torch.Tensor):
        video = video.numpy()
    video = np.einsum(f"{format} -> THWC", video.astype(np.uint8))
    _, height, width, _ = video.shape
    if height % 2 != 0:
        video = video[:, :-1, :, :]
    if width % 2 != 0:
        video = video[:, :, :-1, :]
    print(video.shape)
    frames: List[np.ndarray] = list(video)
    clip = ImageSequenceClip(frames, fps=fps)

    return clip.ipython_display()
Esempio n. 24
0
def make_movie(cam: Cam, day: str, regular: bool = True):
    regular = 'regular' if regular else ''
    root = Path(conf.root_dir) / 'data' / cam.name
    path = root / 'regular' / 'imgs' / day
    logger.info(f'Running make movie for {path}:{day}')
    # sequence = check_sequence_for_gray_images(sorted(str(p) for p in path.iterdir()))
    sequence = sorted(str(p) for p in path.iterdir())
    txt_clip = make_txt_movie(sequence, cam.fps)
    logger.info(f'Composing clip for {path}:{day}')
    image_clip = ImageSequenceClip(sequence, fps=cam.fps)
    logger.info(f'ImageSequenceClip ready')
    clip = CompositeVideoClip(
        [image_clip, txt_clip.set_position(('right', 'top'))], use_bgclip=True)
    logger.info(f'CompositeVideoClip ready')
    movie_path = root / regular / 'clips' / f'{day}.mp4'
    movie_path.parent.mkdir(parents=True, exist_ok=True)
    clip.write_videofile(str(movie_path), audio=False)
Esempio n. 25
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):#
            quat = self.ref_motion.euler_to_quat(0,0,0)
            _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0,0,1.575], base_orn_nom=quat, fixed_base=True)
            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0, 0, 1.175], fixed_base=False)
            q_nom = self.ref_motion.ref_motion_dict()
            base_orn_nom = self.ref_motion.get_base_orn()

            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom, base_pos_nom=[0, 0, 1.175], fixed_base=False)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()

            for step in range(self.max_step_per_episode):
                if step>=2*self.network_freq and step<4*self.network_freq:
                    action = [0,0,0,0,0,0,0,0,0.1,0,0]
                else:
                    action = [0,0,0,0,0,0,0,0,0,0,0]
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(np.shape(action)) == 0 else np.array(action)

                rgb=self.env._render(roll=0,pitch=0,yaw=90)
                print(rgb.shape)
                self.image_list.append(rgb)
                for i in range(self.sampling_skip):
                    # action = self.control.rescale(ref_action, self.config.conf['action-bounds'],
                    #                               self.config.conf['actor-output-bounds'])
                    _,_,_,_ = self.env._step(action)

                    self.logging.add_run('action', action)

                    joint_angle = self.control.get_joint_angle()
                    self.logging.add_run('joint_angle', joint_angle)
                    readings = self.env.getExtendedReading()
                    ob = self.env.getObservation()
                    for l in range(len(ob)):
                        self.logging.add_run('observation' + str(l), ob[l])
                    # for key, value in readings.items():
                    #     self.logging.add_run(key, value)

            self.env._stopLoggingVideo()
            self.env.stopRendering()

        ave_reward = total_reward/self.config.conf['test-num']
        print(ave_reward)
        self.logging.save_run()


        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path+'/test.gif')
        clip.write_videofile(self.dir_path+'/test.mp4', fps=25, audio=False)
Esempio n. 26
0
def do_actual_lapse(lapse_instance_id, fps, output_size, image_path_list=[], image_id_list=[]):
    image_path_list = [str(i) for i in image_path_list] #forcing str moviepy/issues/293
    # print image_path_list
    try:
        clip = ImageSequenceClip(image_path_list, fps=fps)
    except ValueError as exc:
        [generate_thumbs.delay(i) for i in image_id_list]
        do_actual_lapse.retry(kwargs={"lapse_instance_id":lapse_instance_id, "fps":fps, "output_size":output_size, "image_path_list":image_path_list, "image_id_list":image_id_list}, exc=exc, countdown=15)
    lapse_instance = AutoLapseInstance.objects.get(pk=lapse_instance_id)
    uuid = shortuuid.uuid()


    alfile = AutoLapseInstanceFile.objects.create(instance=lapse_instance, output_size=output_size, uuid=uuid)
    path_prefix = target_path_generator(alfile, prefix=settings.MEDIA_ROOT)
    if not os.path.exists(path_prefix):
        os.makedirs(path_prefix)
    # print path_prefix
    if clip.h % 2 != 0:
        clip.size = (clip.w, clip.h -1)
    if clip.w % 2 != 0:
        clip.size = (clip.w - 1, clip.h)



    clip.write_videofile(video_mp4_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_videofile(video_webm_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_gif(gif_name_generator(alfile, prefix=settings.MEDIA_ROOT))

    alfile.file_video_mp4 = video_mp4_name_generator(alfile)
    alfile.file_video_webm = video_webm_name_generator(alfile)
    alfile.file_video_gif = gif_name_generator(alfile)

    alfile.save()

    lapse_instance.status = LapseInstanceStatus.COMPLETED
    lapse_instance.save()
Esempio n. 27
0
def job(item):
    fn = item
    outpath = os.path.join(fn, 'flow.mp4')
    if not os.path.exists(outpath):
        flows = torch.stack([
            torch.from_numpy(read_flow(_))
            for _ in glob(os.path.join(fn, '*.flo'))
        ])
        flows = list(normalize_flows(flows))
        flows = list(flows)
        rgb_flows = [make_uint8(flow2rgb(_.numpy())) for _ in flows]
        vid = ImageSequenceClip(rgb_flows, fps=8)
        vid.write_videofile(outpath, fps=8, verbose=False, logger=None)
        vid.close()
Esempio n. 28
0
def make_beatmeter(
        stack: ExitStack,
        beatmeter: str,
        fps: float,
        duration: float,
        dims: (int, int),
):
    xdim, ydim = dims
    beat_image_filenames = [
        str(os.path.join(beatmeter, filename))
        for filename in sorted(os.listdir(beatmeter))[:ceil(duration * fps)]
    ]
    beatmeter = stack.enter_context(
        ImageSequenceClip(beat_image_filenames, fps=fps))
    # resize and fit beatmeter
    # pylint: disable=no-member
    new_height = (beatmeter.h * xdim / beatmeter.w)
    beatmeter = resize(beatmeter, (xdim, new_height))
    beatmeter = beatmeter.set_position(("center", ydim - 20 - beatmeter.h))
    return beatmeter
 def generateClip(self, filename):
     glutHideWindow()
     self.runSimulation()
     clip = ImageSequenceClip(self.frames, fps=self.FPS)
     clip.write_videofile(filename, fps=self.FPS)
Esempio n. 30
0
                'image': open('./image_data/frame' + str(currentframe) + '.jpg', 'rb'),
            },
            headers={'api-key': 'PUT API KEY FOR DEEPAI HERE'}
        )
        response = r.json()
        print(response)

        imagelink = requests.get(response['output_url'])

        file = open("./deep_image/" + str(currentframe) + '.jpg', "wb")
        file.write(imagelink.content)
        file.close()
        currentframe += 1
    else:
        break

cap = cv2.VideoCapture("video.mp4")

fpsa = cap.get(cv2.CAP_PROP_FPS)
print(fpsa)
im = Image.open('./deep_image/0.jpg')

print(im.size)
print(type(im.size))
w, h = im.size

clip = ImageSequenceClip("./deep_image/", fps = fpsa)

clip.write_videofile("deep_video.mp4", fps=clip.fps,
                      audio_bitrate="1000k", bitrate="4000k")
Esempio n. 31
0
from moviepy import *
from moviepy.editor import *
from moviepy.video.VideoClip import TextClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
import os

from moviepy.video.io.VideoFileClip import VideoFileClip

basic_directory = 'img'
basic_files = os.listdir(basic_directory)
clip = ImageSequenceClip([
    '{0}/{1}'.format(basic_directory, basic_files[0]),
    '{0}/{1}'.format(basic_directory, basic_files[1]),
    '{0}/{1}'.format(basic_directory, basic_files[2]),
    '{0}/{1}'.format(basic_directory, basic_files[3]),
],
                         fps=0.5)
clip.write_videofile("myHolidays_edited.mp4", audio='music/sunny.mp3')
clip = VideoFileClip("myHolidays_edited.mp4").subclip(0, 15)
clip.write_videofile("myHolidays_edited.mp4", codec='mpeg4')
Esempio n. 32
0
for r, (inputData, outputData, t0, t1, v0, v1, neuron_data) in enumerate(simulated):
    times = [t for t, v in neuron_data.values()[0]]
    nodes = list(neuron_data.keys())
    time_data = []
    for i, t in enumerate(times):
        tdata = {}
        for n in nodes:
            tdata[n] = neuron_data[n][i][1]

        tdata[0] = 0.0 if inputData[0] == 0 else -75.0
        tdata[1] = 0.0 if inputData[1] == 0 else -75.0

        time_data.append(tdata)

    for ti, (t, tdata) in enumerate(zip(times, time_data)):
        node_colors = {}
        for n, v in tdata.items():
            node_colors[n] = voltage_to_color(v)

        fn = 'spiking-{0:04d}'.format(len(filenames))
        dot = visualize.draw_net(winner, filename=fn, view=False, node_names=node_names, node_colors=node_colors,
                                 fmt='png', show_disabled=False, prune_unused=True)
        filenames.append(dot.filename + '.png')

clip = ImageSequenceClip(filenames, fps=30)
clip.write_videofile('spiking.mp4', codec="mpeg4", bitrate="2000k")

for fn in filenames:
    os.unlink(fn)
    os.unlink(fn[:-4])
Esempio n. 33
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0,0,1.5], fixed_base=True)
            state = self.env._reset(Kp=self.config.conf['Kp'],
                                    Kd=self.config.conf['Kd'],
                                    base_pos_nom=[0, 0, 1.175],
                                    fixed_base=False)
            q_nom = self.ref_motion.ref_motion_dict()
            base_orn_nom = self.ref_motion.get_base_orn()

            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom, base_pos_nom=[0, 0, 1.175], fixed_base=False)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()
            self.ref_motion.reset(index=0)
            # self.ref_motion.random_count()

            self.control.reset(
                w_imitation=self.config.conf['imitation-weight'],
                w_task=self.config.conf['task-weight'])

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                gait_phase = self.ref_motion.count / self.ref_motion.dsr_length
                ref_angle = self.ref_motion.ref_joint_angle()
                ref_vel = self.ref_motion.ref_joint_vel()

                self.env.checkSelfContact()

                state = self.env.getExtendedObservation()
                state = np.squeeze(state)
                state = np.append(state, [
                    np.sin(np.pi * 2 * gait_phase),
                    np.cos(np.pi * 2 * gait_phase)
                ])
                # state = np.append(state,[0,0])

                action, actor_info = self.agent.agent.actor.get_action(state)
                mean = actor_info['mean']
                logstd = actor_info['logstd']
                action = mean
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(
                    np.shape(action)) == 0 else np.array(action)

                f = self.env.rejectableForce_xy(1.0 / self.network_freq)
                rgb = self.env._render()
                print(rgb.shape)
                self.image_list.append(rgb)

                # action = self.control.rescale(ref_action, self.config.conf['action-bounds'],
                #                               self.config.conf['actor-output-bounds'])
                self.control.update_ref(ref_angle, ref_vel, [])
                next_state, reward, terminal, info = self.control.control_step(
                    action, self.force, gait_phase)
                self.ref_motion.index_count()

                total_reward += reward

                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                # for l in range(len(ob)):
                #     self.logging.add_run('observation' + str(l), ob[l])
                #     self.logging.add_run('filtered_observation' + str(l), ob_filtered[l])
                self.logging.add_run('action', action)
                self.logging.add_run('ref_action', ref_angle)
                joint_angle = self.control.get_joint_angle()
                self.logging.add_run('joint_angle', joint_angle)
                readings = self.env.getExtendedReading()
                # for key, value in readings.items():
                #     self.logging.add_run(key, value)
                self.logging.add_run('task_reward', info['task_reward'])
                self.logging.add_run('imitation_reward',
                                     info['imitation_reward'])
                self.logging.add_run('total_reward', info['total_reward'])
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if terminal:
                    break
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path + '/test.gif')
        clip.write_videofile(self.dir_path + '/test.mp4', fps=25, audio=False)
        ave_reward = total_reward / self.config.conf['test-num']

        print(ave_reward)
        self.logging.save_run()
Esempio n. 34
0
def video_map_images_people(video_or_file,
                            resize=('max2', 400),
                            fps=None,
                            with_times=False,
                            logger=None,
                            dtype=None,
                            class_to_keep=15,
                            fLOG=None,
                            **kwargs):
    """
    Extracts characters from a movie.
    The movie is composed with an image and a
    `mask <https://zulko.github.io/moviepy/ref/AudioClip.html?highlight=mask#moviepy.audio.AudioClip.AudioClip.set_ismask>`_.
    Extrait les personnages d'un film, le résultat est
    composé d'une image et d'un masque transparent
    qui laissera apparaître l'image d'en dessous si cette
    vidéo est aposée sur une autre.

    @param      video_or_file   string or :epkg:`VideoClip`
    @param      resize          see :meth:`predict <code_beatrix.ai.image_segmentation.DLImageSegmentation.predict>`
    @param      fps             see @see fn video_enumerate_frames
    @param      with_times      see @see fn video_enumerate_frames
    @param      logger          see @see fn video_enumerate_frames
    @param      dtype           see @see fn video_enumerate_frames
    @param      class_to_keep   class to keep from the image, it can
                                a number (15 for the background, a list of classes,
                                a function which takes an image and the prediction
                                and returns an image)
    @param      fLOG            logging function
    @param      kwargs          see @see cl DLImageSegmentation
    @return                     :epkg:`VideoClip`

    .. warning:: A couple of errors timeout, out of memory...
        The following processes might be quite time consuming
        or memory consuming. If it is the case, you should think
        of reducing the resolution, the number of frames per seconds
        (*fps*). You can also split the video and process each piece
        independently and finally concatenate them.

    .. exref::
        :title: Extract characters from a video.

        The following example shows how to extract a movie with
        people and without the background. It works better
        if the contrast between the characters and the background is
        high.

        ::

            from code_beatrix.art.video import video_extract_video, video_save
            from code_beatrix.art.videodl import video_map_images

            vide = video_extract_video("something.mp4", 0, 5)
            vid2 = video_map_images(vide, fps=10, name="people", logger='bar')
            video_save(vid2, "people.mp4")

        The function returns something like the the following.
        The character is wearing black and the background is quite
        dark too. That explains that the kind of large halo
        around the character.

        .. video:: videodl.mp4
    """
    if isinstance(class_to_keep, int):

        def local_mask(img, pred):
            img[pred != class_to_keep] = 0
            return img
    elif isinstance(class_to_keep, (set, tuple, list)):

        def local_mask(img, pred):
            dist = set(pred.ravel())
            rem = set(class_to_keep)
            for cl in dist:
                if cl not in rem:
                    img[pred == cl] = 0
            return img
    elif callable(class_to_keep):
        local_mask = class_to_keep
    else:
        raise TypeError(
            "class_to_keep should be an int, a list or a function not {0}".
            format(type(class_to_keep)))

    if fLOG:
        fLOG('[video_map_images_people] loads deep learning model')
    model = DLImageSegmentation(fLOG=fLOG, **kwargs)
    iter = video_enumerate_frames(video_or_file,
                                  fps=fps,
                                  with_times=with_times,
                                  logger=logger,
                                  dtype=dtype,
                                  clean=False)
    if fLOG is not None:
        if fps is not None:
            every = max(fps, 1)
            unit = 's'
        else:
            every = 20
            unit = 'i'

    if fLOG:
        fLOG('[video_map_images_people] starts extracting characters')
    seq = []
    for i, img in enumerate(iter):
        if not logger and fLOG is not None and i % every == 0:
            fLOG('[video_map_images_people] process %d%s images' % (i, unit))
        if resize is not None and isinstance(resize[0], str):
            if len(img.shape) == 2:
                resize = DLImageSegmentation._new_size(img.shape, resize)
            else:
                resize = DLImageSegmentation._new_size(img.shape[:2], resize)
        img, pred = model.predict(img, resize=resize)
        img2 = local_mask(img, pred)
        seq.append(img2)
    if fLOG:
        fLOG('[video_map_images_people] done.')

    return ImageSequenceClip(seq, fps=fps)