Пример #1
0
    def animate(self, zs, time_range=[None], max_frames=75, image_width=7, fps=15, filename=None):
        print("Rendering animation ...", file=sys.stderr)
        fig, space_step, Q, P, T, scatter_scale = self.plot(0, zs, image_width=image_width, show_plot=False)

        if not hasattr(zs, "__iter__"):
            zs = [zs]

        time_slice = slice(*time_range)
        data = self.datas[time_slice]
        time_step = (len(data) + max_frames - 1 ) // max_frames if max_frames > 0 else 1
        data = data[::time_step]
        times = self.times[time_slice][::time_step]

        steps = len(data)
        with progressbar.ProgressBar(max_value=steps) as bar:
            def do_steps(i):
                for j, z in enumerate(zs):
                    mx, my, mz  = data[i][:,::space_step,::space_step,z]
                    Q[j].set_UVC(mx, my)
                    P[j].set_array(mz.reshape(-1))
                    P[j].set_sizes(mz.reshape(-1) ** 2 * scatter_scale)
                    T[j].set_text(r"$z = {:3.2e}$ m, $t={:3.2e} $s".format(z * self.lengths[2] / self.dims[2], times[i]))
                    bar.update(i + 1)
                return Q,

            anim = animation.FuncAnimation(fig, do_steps, range(steps), interval=1000/fps)
            if not filename is None:
                anim.save(filename, fps=fps)
                plt.close()
                display(Video(filename))
            else:
                plt.close()
                display(anim)
Пример #2
0
def notebook_video(video):
    """Display IPython Video object in the notebook

    :param video: path to the video file
    """
    if video:
        display(Video(video))
Пример #3
0
def create_video(self,
                 genome,
                 config,
                 fout="./out.mp4",
                 fps=30,
                 quality=7,
                 render=False,
                 i_seed=0):
    net = self.make_net(genome, config)
    env = gym.make(self._env_name)
    if i_seed is not None:
        env.seed(self._seed[i_seed])

    state = env.reset()
    done = False
    imgs = []

    while not done:
        if render:
            env.render()
        action = self.activate_net(net, state)
        state, _, done, _ = env.step(action)
        imgs.append(state)

    env.close()

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        imageio.mimwrite(fout, imgs, fps=fps, quality=quality)

    return Video(fout)
Пример #4
0
def show_simple_run(x0,
                    ca,
                    num_steps=20,
                    batch=0,
                    only_alive_cells=False,
                    white_background=False):
    x = x0
    with VideoWriter("_autoplay.mp4") as vid:
        for i in range(num_steps):
            x = ca(x)
            if only_alive_cells:
                res = utils.zoom(
                    (x[batch, :, :, 3].numpy() > 0.1).astype(np.float32))
                vid.add(res)
            else:
                vid.add(
                    utils.zoom(
                        rgba_to_rgb(ca.classify(x)[batch],
                                    white_background=white_background)))
    run = Video("./_autoplay.mp4",
                width=320,
                height=220,
                html_attributes="autoplay controls",
                embed=True)
    return run
Пример #5
0
    def _repr_html_(self):
        """TODO: codec"""
        from IPython.display import Video

        embed = True
        if urlparse(self.uri).scheme.lower().startswith("http"):
            embed = False
        return Video(self.uri, embed=embed, width=480, height=320)._repr_html_()
Пример #6
0
def make_training_video(folder_dir):
    files = sorted([
        os.path.join(folder_dir, f) for f in os.listdir(folder_dir)
        if f.endswith('.png')
    ])
    frames = [mpy.ImageClip(f).set_duration(1) for f in files]
    clip = mpy.concatenate_videoclips(frames, method="compose")
    clip.write_videofile("movie.mp4", fps=15)
    return Video("movie.mp4")
Пример #7
0
def save_video(video_frames, video_preds, fps, output_path):

    ani = animated_landmarks(video_frames, video_preds, fps=fps)
    Writer = animation.writers['ffmpeg']
    writer = Writer(fps=fps, metadata=dict(artist='StanfordFacialNerveCenter'))
    print(f'output video: {output_path}')
    ani.save(output_path, writer=writer)

    return Video(output_path)
Пример #8
0
    def displayCanvas(self,
                      canvas,
                      filename=None,
                      width="100%",
                      loop=True,
                      autoplay=True,
                      controls=True,
                      center=False):
        """ displayCanvas """

        if canvas is None:
            return None

        if self.animation == 'none':
            #
            # Just create a frame from the last state and display it
            #
            AnimationDisabledError = "Note: Canvas animation has been disabled - showing final frame"

            im = canvas.getLastFrame(AnimationDisabledError)
            display(im)
            return

        if self.animation == 'spacetime':
            #
            # Get the spacetime diagrams
            #

            print("Spacetime")

            for image in canvas.getLastFrame():
                display(image)

            return

        if filename is None:
            basename = Path(self._random_string(10) + ".mp4")
            filename = self.tmpdir / basename

        posix_filename = filename.as_posix()
        canvas.saveMovie(posix_filename)

        # TBD: Actually pay attention to width and centering
        final_width = "" if width is None else " width=\"{0}\"".format(width)
        final_center = "" if not center else " style=\"display:block; margin: 0 auto;\""

        final_loop = "" if not loop else " loop"
        final_autoplay = "" if not autoplay else " autoplay"
        final_controls = "" if not controls else " controls"

        final_attributes = f"{final_loop}{final_autoplay}{final_controls}"

        video = Video(f"./{posix_filename}",
                      html_attributes=final_attributes,
                      width=800)
        display(video)
Пример #9
0
 def show(self):
     try:
         if os.path.splitext(self.uri)[-1].lower() == ".gif":
             from IPython.display import Image
             display(Image(self.uri))
         else:
             from IPython.display import Video
             display(Video(self.uri, html_attributes="loop autoplay muted controls"))
     except:
         pass
    def animation_scan_mcp(self, scan_num, v_range, fig_size, font_size, show = 1, clean = 1):
        '''
            Generate animations with specified MCP scan_num.
            v_range = [v_min, v_max]
            v_max = -1: use highest count as vmax
        '''
        start_time = time.time()

        # read data
        (imgs_data, positions_data) = self.img_data_scan_mcp(scan_num)
        [v_min, v_max] = v_range
        if v_max == -1: # use highest count as vmax
            v_max = np.amax(imgs_data)

        # prepare directory
        DIR = './' + self.PROJECT_NAME + '/Data/MCP_images/scan_' + str(scan_num).zfill(3)
        if not os.path.exists(DIR):
            os.makedirs(DIR)
        DIR_movie = './' + self.PROJECT_NAME + '/Data/MCP_images/movie_scan_' + str(scan_num).zfill(3) + '.mp4'

        # plotting
        print("Start generating images...")
        for snap_no in range(len(imgs_data)):
            fig = plt.figure(figsize=fig_size)
            plt.rcParams.update({'font.size': font_size})

            ax = fig.add_subplot(111)
            ax.set_title('snap ' + str(snap_no))
            img = plt.imshow(imgs_data[snap_no], vmin=v_min, vmax=v_max)
            divider = make_axes_locatable(ax)
            cax = divider.append_axes("right", size="5%", pad=0.1)
            plt.colorbar(img, cax)

            fig.tight_layout()
            plt.savefig(DIR + '/snap_' + str(snap_no).zfill(2) + '.png', dpi = 150, format = 'png')
            plt.close()
        print("Done.")

        # generate animation using ffmpeg
        print("Start generating animation...")
        os.system("ffmpeg -r 5 -i " + DIR + "/snap_%02d.png" 
                  + " -vcodec mpeg4 -y " + DIR_movie)
        print("Done.")
        # delete all figures
        if clean == 1:
            shutil.rmtree(DIR)
            print("All images are deleted.")

        elapsed_time = time.time() - start_time
        print("Time consuming: {0:.3f}s.".format(elapsed_time))

        if show == 1:
            return Video(DIR_movie, width=600, height=600)

        return
Пример #11
0
def show_dataset(data):
    ''' Shows dataset in a video (time is the slice dimension) '''

    if len(data.shape) == 4:
        _data = combine_channels(data)
    elif len(data.shape) == 3:
        _data = data
    else:
        return None

    _data = (np.abs(_data) * 255).astype(np.uint8)
    imageio.mimwrite('_.mp4', _data, fps=30); 
    return Video('_.mp4', width=_data.shape[2], height=_data.shape[1])
Пример #12
0
 def play(self, player = None, **kwargs):
     from IPython.display import Video, HTML
     if player:
         subprocess.call([player+
             " "+self.vid_file], shell=True)
         return
     try: # if in notebook, this loop runs and Video object is returned
         get_ipython
         return Video(self.vid_file, embed=True)
     # If running from terminal, `get_ipython` will raise error 
     # and `globalVariable.video_player` will be called
     except: subprocess.call([globalVariables.video_player+
             " "+self.vid_file], shell=True)
Пример #13
0
def load_video(video_path, output_size=(270, 480)):

    cap = cv2.VideoCapture(video_path)
    frames_list = []
    while True:
        success, frame = cap.read()
        if not success:
            break
        frame = cv2.cvtColor(cv2.resize(frame, output_size), cv2.COLOR_BGR2RGB)
        frames_list.append(frame)
    print(
        f'Video {video_path} loaded: {len(frames_list)} frames with shape {np.shape(frames_list[0])}'
    )
    fps = cap.get(cv2.CAP_PROP_FPS)

    return frames_list, fps, Video(video_path, width=output_size[0])
Пример #14
0
def display_video(selection):
    from IPython.display import Video
    from pathlib import Path

    # TODO: Handle when selection is an abs-path
    try:
        # selection is an absolute path and Video expects
        # a relative path
        rel_path = str(Path.cwd().relative_to(selection))
    except ValueError as e:
        # a relative path was given
        rel_path = selection
    
    display(f'Displaying "{rel_path}"')

    return Video(rel_path)
Пример #15
0
    def display(self, width: int = None, height: int = None, **kwargs):
        """
        Customize visualization in jupyter notebook

        Parameters
        ----------
        width: int, default None
            Width in pixels. Defaults to the original video width
        height: int, default None
            Height in pixels. Defaults to the original video height
        kwargs: dict
            See :py:class:`IPython.display.Video` doc for other kwargs

        Returns
        -------
        v: IPython.display.Video
        """
        from IPython.display import Video

        return Video(self.uri, width=width, height=height, **kwargs)
Пример #16
0
def animate_decision_regions(X, Y, predict_history, *, Xhistory=None, ax=None, filename=None, interval=50):
    '''
    make_predict: function that takes parameters
    '''
    import matplotlib.pyplot as plt
    if ax is None: f, ax = plt.subplots()
    f = ax.figure

    # HACK HACK comment this
    if Xhistory is None:
        Xhistory = [X] * len(predict_history)

    kw = dict(force_matplotlib_output_png_hack=False)

    plot_decision_regions(predict_history[0], Xhistory[0], Y, ax=ax, **kw)

    def update(t):
        for a in ax.lines + ax.collections:
            a.remove()
        plot_decision_regions(predict_history[t], Xhistory[t], Y, ax=ax, **kw)
        return []

    a = animation.FuncAnimation(
        f, update, frames=len(predict_history), interval=interval, blit=True, repeat=False)
    plt.close()

    if filename is not None:
        assert filename.endswith('.gif') or filename.endswith('.mp4'), 'Only supports exporting to .gif or .mp4'
        if filename.endswith('.mp4'):
            Writer = animation.writers['ffmpeg']
            writer = Writer(fps=1000./interval, bitrate=1800)
            a.save(filename, writer=writer)
            from IPython.display import Video
            return Video(filename)
        else:
            a.save(filename, writer='imagemagick')
            from IPython.display import Image
            return Image(filename)

    from IPython.display import HTML
    return HTML(a.to_jshtml())
Пример #17
0
    def show(file, width=640, height=480, mode='windowed', title='Untitled'):
        """
        Helper function which actually does the "showing".

        Args:
            file (str): Path to the file.
            width (int, optional): The width of the window. Defaults to 640.
            height (int, optional): The height of the window. Defaults to 480.
            mode (str, optional): 'windowed' will use ffplay (in a separate window), while 'notebook' will use Image or Video from IPython.display. Defaults to 'windowed'.
            title (str, optional): The title of the window. Defaults to 'Untitled'.
        """
        if mode.lower() == 'windowed':
            cmd = f'ffplay "{file}" -x {width} -y {height} -window_title "{title}"'
            # os.system(cmd)
            show_async(cmd)
        elif mode.lower() == 'notebook':
            video_formats = ['.avi', '.mp4', '.mov', '.mkv', '.mpg',
                             '.mpeg', '.webm', '.ogg', '.ts', '.wmv', '.3gp']
            image_formats = ['.jpg', '.png', '.jpeg', '.tiff', '.gif', '.bmp']
            file_extension = os.path.splitext(file)[1].lower()

            if file_extension in video_formats:
                file_type = 'video'
            elif file_extension in image_formats:
                file_type = 'image'
            if file_type == 'image':
                display(Image(file))
            elif file_type == 'video':
                if file_extension not in ['.mp4', '.webm', '.ogg']:
                    from musicalgestures._utils import convert_to_mp4
                    print(
                        'Only mp4, webm and ogg videos are supported in notebook mode.')
                    video_to_display = convert_to_mp4(file)
                else:
                    video_to_display = file

                display(Video(video_to_display, width=800))

        else:
            print(
                f'Unrecognized mode: "{mode}". Try "windowed" or "notebook".')
 def show_data(self, interactive):
     self.get_data_path()  #Extract path to video clip
     if (interactive):
         display(Video(self.data_path, embed=True))
     else:
         # Create a VideoCapture object and read from input file
         cap = cv2.VideoCapture(self.data_path)
         # Check if video is correctly open
         assert cap.isOpened(), "Not able to open video"
         # Read until video is completed
         while (cap.isOpened()):
             ret, frame = cap.read()
             if ret == True:
                 # Display the resulting frame
                 cv2.imshow('Frame', frame)
                 if cv2.waitKey(25) & 0xFF == ord('q'):
                     break
                 else:
                     break
         cap.release()
         cv2.destroyAllWindows()
Пример #19
0
        def manim(self, line, cell=None, local_ns=None):
            r"""Render Manim scenes contained in IPython cells.
            Works as a line or cell magic.

            .. note::

                This line and cell magic works best when used in a JupyterLab
                environment: while all of the functionality is available for
                classic Jupyter notebooks as well, it is possible that videos
                sometimes don't update on repeated execution of the same cell
                if the scene name stays the same.

                This problem does not occur when using JupyterLab.

            Please refer to `<https://jupyter.org/>`_ for more information about JupyterLab
            and Jupyter notebooks.

            Usage in line mode::

                %manim MyAwesomeScene [CLI options]

            Usage in cell mode::

                %%manim MyAwesomeScene [CLI options]

                class MyAweseomeScene(Scene):
                    def construct(self):
                        ...

            Run ``%manim -h`` for possible command line interface options.
            """
            if cell:
                exec(cell, local_ns)

            args = line.split()
            if not len(
                    args
            ) or "-h" in args or "--help" in args or "--version" in args:
                main(args, standalone_mode=False, prog_name="manim")
                return
            modified_args = ["--jupyter"] + args[:-1] + [""] + [args[-1]]
            args = main(modified_args,
                        standalone_mode=False,
                        prog_name="manim")
            with tempconfig(local_ns.get("config", {})):
                config.digest_args(args)
                exec(f"{config['scene_names'][0]}().render()", local_ns)
                local_path = Path(config["output_file"]).relative_to(
                    Path.cwd())
                tmpfile = (Path(config["media_dir"]) / "jupyter" /
                           f"{_video_hash(local_path)}{local_path.suffix}")

                if local_path in self.rendered_files:
                    self.rendered_files[local_path].unlink()
                self.rendered_files[local_path] = tmpfile
                os.makedirs(tmpfile.parent, exist_ok=True)
                shutil.copy(local_path, tmpfile)

                file_type = mimetypes.guess_type(config["output_file"])[0]
                if file_type.startswith("image"):
                    display(Image(filename=config["output_file"]))
                    return

                # videos need to be embedded when running in google colab
                video_embed = "google.colab" in str(get_ipython())

                display(
                    Video(
                        tmpfile,
                        html_attributes=
                        f'controls autoplay loop style="max-width: {config["media_width"]};"',
                        embed=video_embed,
                    ))
Пример #20
0
    point_conv.set_data(frame+1, current_ratio[frame])
    return ln_in, ln_out, line_conv, point_conv


ani = animation.FuncAnimation(fig, update, frames=n_samples, interval=150,
                              init_func=init, blit=True)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani.save('pi.mp4', writer=writer)
plt.show()


# %%

from IPython.display import Video
Video("pi.mp4")  # Need ffmpeg installed on your machine


# %%


# Reminder: orange area is:
np.pi/4


# %%


@jit(nopython=True)
def monte_carlo_pi(n_samples=1000):
    acc = 0
Пример #21
0
    def run(self, mode=None, directory=None):

        env = self.make_env(mode=mode, directory=directory)

        self.reset_data()
        self.model.model.reset_states()
        state = env.reset()

        resting = 0
        x_pos = 0
        prev_state = state
        done = False
        last_x_pos = 24

        prev_action = self.take_action()

        for step in range(self.max_steps):

            if step % self.fps == 0:
                action = self.take_action()

            next_state, reward, done, info = env.step(action)

            reward += self.button_penalty * list_dist(prev_action, action)
            prev_action = action

            #advancing check
            if info['x_pos'] > x_pos:
                x_pos = info['x_pos']
                resting = 0

            #sub-area entry check
            if abs(last_x_pos - info['x_pos']) > 10:
                resting = 0
                x_pos = info['x_pos']

            last_x_pos = info['x_pos']

            resting += 1

            if info['life'] < 2:
                break

            if resting > self.patience * 60:
                break

            self.gather_data(step, state, reward, done, info, next_state)
            prev_state = state
            state = next_state

            if mode == 'render':
                env.render()

            if done:
                break

        if mode == 'monitor':
            file_name = directory + 'openaigym.video.%s.video000000.mp4' % env.file_infix
            mp4 = Video(file_name, width=400, height=300)
            self.video = mp4
        else:
            env.close()
Пример #22
0
 def addVideo(self,path,width=None):
     self.addmd(Video(path)._repr_html_(),width=width)
Пример #23
0
sim.run(until=200)

eps_data = sim.get_array(center=mp.Vector3(),
                         size=cell,
                         component=mp.Dielectric)
plt.figure(dpi=100)
sim.plot2D()
#plt.show()
plt.savefig('PLM-Box.png')

ez_data = sim.get_array(center=mp.Vector3(), size=cell, component=mp.Ez)
plt.figure(dpi=100)
sim.plot2D(fields=mp.Ez)
#plt.show()
plt.savefig('EzComponent.png')

#sim.reset_meep()
f = plt.figure(dpi=100)
Animate = mp.Animate2D(sim, fields=mp.Ez, f=f, realtime=False, normalize=True)
plt.close()

sim.run(mp.at_every(1, Animate), until=100)
plt.close()

filename = "straight_waveguide.mp4"
Animate.to_mp4(20, filename)

from IPython.display import Video
Video(filename)
Пример #24
0
        def manim(
            self,
            line: str,
            cell: str = None,
            local_ns: dict[str, Any] = None,
        ) -> None:
            r"""Render Manim scenes contained in IPython cells.
            Works as a line or cell magic.

            .. hint::

                This line and cell magic works best when used in a JupyterLab
                environment: while all of the functionality is available for
                classic Jupyter notebooks as well, it is possible that videos
                sometimes don't update on repeated execution of the same cell
                if the scene name stays the same.

                This problem does not occur when using JupyterLab.

            Please refer to `<https://jupyter.org/>`_ for more information about JupyterLab
            and Jupyter notebooks.

            Usage in line mode::

                %manim [CLI options] MyAwesomeScene

            Usage in cell mode::

                %%manim [CLI options] MyAwesomeScene

                class MyAweseomeScene(Scene):
                    def construct(self):
                        ...

            Run ``%manim --help`` and ``%manim render --help`` for possible command line interface options.

            .. note::

                The maximal width of the rendered videos that are displayed in the notebook can be
                configured via the ``media_width`` configuration option. The default is set to ``25vw``,
                which is 25% of your current viewport width. To allow the output to become as large
                as possible, set ``config.media_width = "100%"``.

            Examples
            --------

            First make sure to put ``import manim``, or even ``from manim import *``
            in a cell and evaluate it. Then, a typical Jupyter notebook cell for Manim
            could look as follows::

                %%manim -v WARNING --disable_caching -qm BannerExample

                config.media_width = "75%"

                class BannerExample(Scene):
                    def construct(self):
                        self.camera.background_color = "#ece6e2"
                        banner_large = ManimBanner(dark_theme=False).scale(0.7)
                        self.play(banner_large.create())
                        self.play(banner_large.expand())

            Evaluating this cell will render and display the ``BannerExample`` scene defined in the body of the cell.

            .. note::

                In case you want to hide the red box containing the output progress bar, the ``progress_bar`` config
                option should be set to ``None``. This can also be done by passing ``--progress_bar None`` as a
                CLI flag.

            """
            if cell:
                exec(cell, local_ns)

            args = line.split()
            if not len(args) or "-h" in args or "--help" in args or "--version" in args:
                main(args, standalone_mode=False, prog_name="manim")
                return
            modified_args = self.add_additional_args(args)
            args = main(modified_args, standalone_mode=False, prog_name="manim")
            with tempconfig(local_ns.get("config", {})):
                config.digest_args(args)

                renderer = None
                if config.renderer == "opengl":
                    # Check if the imported mobjects extend the OpenGLMobject class
                    # meaning ConvertToOpenGL did its job
                    if "OpenGLMobject" in map(lambda cls: cls.__name__, Group.mro()):
                        from manim.renderer.opengl_renderer import OpenGLRenderer

                        renderer = OpenGLRenderer()
                    else:
                        logger.warning(
                            "Renderer must be set to OpenGL in the configuration file "
                            "before importing Manim! Using cairo renderer instead.",
                        )
                        config.renderer = "cairo"

                try:
                    SceneClass = local_ns[config["scene_names"][0]]
                    scene = SceneClass(renderer=renderer)
                    scene.render()
                finally:
                    # Shader cache becomes invalid as the context is destroyed
                    shader_program_cache.clear()

                    # Close OpenGL window here instead of waiting for the main thread to
                    # finish causing the window to stay open and freeze
                    if renderer is not None and renderer.window is not None:
                        renderer.window.close()

                if config["output_file"] is None:
                    logger.info("No output file produced")
                    return

                local_path = Path(config["output_file"]).relative_to(Path.cwd())
                tmpfile = (
                    Path(config["media_dir"])
                    / "jupyter"
                    / f"{_generate_file_name()}{local_path.suffix}"
                )

                if local_path in self.rendered_files:
                    self.rendered_files[local_path].unlink()
                self.rendered_files[local_path] = tmpfile
                os.makedirs(tmpfile.parent, exist_ok=True)
                shutil.copy(local_path, tmpfile)

                file_type = mimetypes.guess_type(config["output_file"])[0]
                if file_type.startswith("image"):
                    display(Image(filename=config["output_file"]))
                    return

                # videos need to be embedded when running in google colab
                video_embed = "google.colab" in str(get_ipython())

                display(
                    Video(
                        tmpfile,
                        html_attributes=f'controls autoplay loop style="max-width: {config["media_width"]};"',
                        embed=video_embed,
                    ),
                )
Пример #25
0
image_prediction.query("input == @image_to_plot").plot_detections()

# # Detecting FEX from videos
# Detecting facial expressions in videos is also easy by using the `detect_video()` method. This sample video is by [Wolfgang Langer](https://www.pexels.com/@wolfgang-langer-1415383?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels) from [Pexels](https://www.pexels.com/video/a-woman-exhibits-different-emotions-through-facial-expressions-3063838/).

# In[21]:

# Find the file you want to process.
from feat.tests.utils import get_test_data_path
import os, glob
test_data_dir = get_test_data_path()
test_video = os.path.join(test_data_dir, "WolfgangLanger_Pexels.mp4")

# Show video
from IPython.display import Video
Video(test_video, embed=True)

# Let's predict facial expressions from the video using the `detect_video()` method.

# In[22]:

video_prediction = detector.detect_video(test_video, skip_frames=24)
video_prediction.head()

# You can also plot the detection results from a video. The frames are not extracted from the video (that will result in thousands of images) so the visualization only shows the detected face without the underlying image.
#
# The video has 24 fps and the actress show sadness around the 0:02, and happiness at 0:14 seconds.

# In[23]:

video_prediction.loc[[48]].plot_detections()
    pickled_puzzle = pickle.load(f)
pickled_puzzle.show()

# これで生成済みのパズルデータをオープンすることができました。

# ---
# ## (番外編)解の軌跡をアニメーション化
# 解の軌跡をアニメーション化してみましょう。
# パズルの巻き戻し・早送り機能を使って、作業履歴を最初から順番に画像化し、
# 外部ファイルを用いてそれを動画化します(このセルの実行には数分かかる場合があります)。

for p in glob.glob("fig/animation/*.png"):
    if os.path.isfile(p):
        os.remove(p)
# jump to top of the frame
tmpPuzzle = pickled_puzzle.jump(0)
tmpPuzzle.saveAnswerImage(f"fig/animation/0000.png")
# save all history as image file
for histNum in range(len(tmpPuzzle.baseHistory)):
    tmpPuzzle = tmpPuzzle.getNext()
    tmpPuzzle.saveAnswerImage(f"fig/animation/{str(histNum+1).zfill(4)}.png")

# 動画化にはmovie_maker.pyを用います。コマンドライン引数で画像が入ったディレクトリとFPSを指定します。

# !python ../python/script/movie_maker.py "fig/animation/" -o "fig/animation/out.mp4" -f 10 -c mp4v

# これで、fig/animation内にout.mp4という動画ファイルが作成されました。
# 再生してみましょう。

Video("fig/animation/out.mp4", width=960, height=480)
Пример #27
0
def convert_jpg_video(video_root,
                      start_frame,
                      dst_path,
                      n_frames=-1,
                      fps=60,
                      insert_pause=-1,
                      show_vid=False,
                      frame_number_display=False,
                      ignore_if_exist=True,
                      store_frames=[]):
    '''
    Helper for Epic and SynthEpic visualization.
    ignore_if_exist: If True, don't write any files if the .mp4 already exists. Set False if store_frames changed.
    '''

    if not (os.path.exists(dst_path)) or not (ignore_if_exist):

        if not (os.path.exists(Path(dst_path).parent)):
            os.makedirs(Path(dst_path).parent)
        if n_frames <= 0:
            n_frames = len(os.listdir(video_root))

        if store_frames != []:
            dst_frame_path = 'single_video_frames/' + str(
                video_root.split('/')[-1]) + '/' + str(start_frame)
            if not (os.path.exists(dst_frame_path)):
                os.makedirs(dst_frame_path)

        # Write all frames
        writer = FFmpegWriter(dst_path, inputdict={'-r': str(fps)})
        for i in range(n_frames):
            cur_frame = start_frame + i
            file_name = 'frame_{:010d}.jpg'.format(cur_frame + 1)
            video_path = os.path.join(video_root, file_name)
            if not (os.path.exists(video_path)):
                break  # n_frames could be overestimated due to extra files

            frame = plt.imread(video_path)
            if cur_frame in store_frames:
                frame_still = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(dst_frame_path, file_name),
                            frame_still)

            if frame_number_display:
                font = cv2.FONT_HERSHEY_SIMPLEX
                bottomLeftCornerOfText = (250, 250)
                fontScale = 1
                fontColor = (255, 255, 255)
                lineType = 2
                frame_annotated = cv2.putText(frame, str(start_frame + i),
                                              bottomLeftCornerOfText, font,
                                              fontScale, fontColor, lineType)

                frame_annotated = frame_annotated.copy()
                writer.writeFrame(frame_annotated)
            else:
                frame = frame.copy()
                writer.writeFrame(frame)

            # Insert pause (with orange borders) just as predictions start to happen
            if insert_pause >= 0 and i == insert_pause:
                alert_frame = frame.copy()
                alert_frame[:20, :] = [255, 128, 0]
                alert_frame[-20:, :] = [255, 128, 0]
                alert_frame[:, :20] = [255, 128, 0]
                alert_frame[:, -20:] = [255, 128, 0]
                for j in range(60):
                    writer.writeFrame(alert_frame)

        writer.close()

    if show_vid:
        # For use in jupyter notebook
        display(Video(dst_path, embed=True, width=480, height=320))
Пример #28
0
fig, ax = plt.subplots(1,1, figsize=(15, 15))
sns.heatmap(ML.corr().iloc[0:10,10:], cmap=sns.color_palette("coolwarm"), center=0,square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
q19_cnt = multiple_choice['Q19'].value_counts()[:-1]

fig = go.Figure([go.Bar(x=q19_cnt.index, y=q19_cnt, marker_color='crimson')])
fig.update_layout(title="Programming Language Ranking")
fig.show()
q18_df, q18_sub = q_list('Q18')
q18_df.drop(0, axis=0, inplace=True)
q18_convert = {b:a for a, b in zip(q18_sub.values, q18_df.columns)}
q18_df.rename(columns=q18_convert, inplace=True)
msno.matrix(q18_df, color=(0.37, 0.29, 0.48))
from IPython.display import Video

Video("https://thumbs.gfycat.com/UnderstatedAnotherAntelope-mobile.mp4")
data = multiple_choice[['Q19', 'Q23']]
data.drop(0, axis=0, inplace=True)
career = data['Q23'].value_counts()

fig = go.Figure([go.Bar(x=career.index, y=career, marker_color='#F6CD8B')])
fig.update_layout(title="Distribution of ML Career")
fig.show()
data.groupby('Q23')['Q19'].value_counts().unstack().fillna(0).T[[ '< 1 years', '1-2 years', '2-3 years', 
       '3-4 years', '4-5 years', '5-10 years', '10-15 years', '20+ years']].drop(['None','Other'], axis=0)
fig = px.histogram(data.dropna(), x='Q23', y='Q23', color='Q19', template='ggplot2')
fig.update_layout()
fig.show()
data = multiple_choice[['Q19', 'Q15']]
data.drop(0, axis=0, inplace=True)
Пример #29
0
def play(video, filepath="tmp.mp4"):
    from IPython.display import Video
    video = img_as_ubyte(clip(img_as_float32(np.asarray(video))))
    imageio.mimwrite(filepath, video, fps=24, macro_block_size=None)
    display(Video(filepath, width=800, height=400))
Пример #30
0
Z = Z1

#%%
N = np.zeros(Z.shape, dtype=int)
N[1:-1, 1:-1] += (Z[:-2, :-2] + Z[:-2, 1:-1] + Z[:-2, 2:] + Z[1:-1, :-2] +
                  Z[1:-1, 2:] + Z[2:, :-2] + Z[2:, 1:-1] + Z[2:, 2:])

# 当前细胞为存活状态时,当周围有2个或3个存活细胞时, 该细胞保持原样。
birth = (N == 3)[1:-1, 1:-1] & (Z[1:-1, 1:-1] == 0)
# 当前细胞为死亡状态时,当周围有3个存活细胞时,该细胞变成存活状态。 (模拟繁殖)
survive = ((N == 2) | (N == 3))[1:-1, 1:-1] & (Z[1:-1, 1:-1] == 1)
Z[...] = 0
Z[1:-1, 1:-1][birth | survive] = 1

#%%
Video("game-of-life.mp4")

#%%

#%% [markdown]
# # 参考
#
# 1. [Numerical Python: A Practical Techniques Approach for Industry](https://www.amazon.com/Numerical-Python-Practical-Techniques-Approach/dp/1484205545)
# 1. [Guide to NumPy, by the creator of Numpy](http://web.mit.edu/dvp/Public/numpybook.pdf)
# 1. [Scipy Lecture Notes](http://www.scipy-lectures.org/intro/numpy/)
# 1. [From Python to Numpy](http://www.labri.fr/perso/nrougier/from-python-to-numpy)

#%%

#%%