Ejemplo n.º 1
0
    def animate(self,
                fps=10,
                W=1000,
                H=618,
                duration=20,
                scale='auto',
                fname=None):
        if fname is None:
            import tempfile
            fname = tempfile.mktemp() + '.mp4'
        import matplotlib.pyplot as plt
        self.dt = 1. / fps
        inches_per_pt = 1.0 / 72.27
        from moviepy.video.io.bindings import mplfig_to_npimage
        import moviepy.editor as mpy

        def make_frame_mpl(t):
            self.t = t
            self.update()
            fig = plt.figure(figsize=(W * inches_per_pt, H * inches_per_pt))
            fig, ax = self.plot_structure(fig=fig, ax=None, scale=scale)
            #ax.clear()
            ax.axis('off')
            #fig, ax = self.plot_structure(fig=fig, ax=ax)
            return mplfig_to_npimage(fig)  # RGB image of the figure

        animation = mpy.VideoClip(make_frame_mpl, duration=duration)
        plt.close('all')
        animation.write_videofile(fname, fps=fps)
        return mpy.ipython_display(fname, fps=fps, loop=1, autoplay=1, width=W)
def vid(my_frames):
    def frame(t):
        t = int(t * 10.)
        if t >= len(my_frames):
            t = len(my_frames) - 1
        return ((my_frames[t]) * 255).astype(np.float)

    clip = mpy.VideoClip(frame, duration=len(my_frames) / 10)
    clip.write_videofile(mp4_file, fps=10.)
    display(mpy.ipython_display(mp4_file, height=400, max_duration=100.))
Ejemplo n.º 3
0
    def animate(self, file_path=None, frame_duration=0.1, do_display=True):
        clips = self.to_video(lambda img: ImageClip(img).set_duration(frame_duration))

        clip = concatenate_videoclips(clips, method="compose", bg_color=(255, 255, 255))

        if file_path is not None:
            clip.write_gif(file_path, fps=24, verbose=False, logger=None)

        if do_display:
            src = clip if file_path is None else file_path
            ipd.display(ipython_display(src, fps=24, rd_kwargs=dict(logger=None), autoplay=1, loop=1))
Ejemplo n.º 4
0
    def render(self, fps=10, W=1000, H=618, location=[0, 1.75, -5], head_size=.4, light_intensity=1.2, reflection=1., 
               look_at=[0, 1.5, 0], fov=75, antialiasing=0.001, duration=5, fname='/tmp/temp.webm'):

        def scene(t):
            """ 
            Returns the scene at time 't' (in seconds) 
            """

            head_location = np.array(location) - np.array([0, 0, head_size])
            import vapory
            light = vapory.LightSource([15, 15, 1], 'color', [light_intensity]*3)
            background = vapory.Box([0, 0, 0], [1, 1, 1], 
                     vapory.Texture(vapory.Pigment(vapory.ImageMap('png', '"../files/VISUEL_104.png"', 'once')),
                             vapory.Finish('ambient', 1.2) ),
                     'scale', [self.background_depth, self.background_depth, 0],
                     'translate', [-self.background_depth/2, -.45*self.background_depth, -self.background_depth/2])
            me = vapory.Sphere( head_location, head_size, vapory.Texture( vapory.Pigment( 'color', [1, 0, 1] )))
            self.t = t
            self.update()
            objects = [background, me, light]

            for i_lame in range(self.N_lame):
                #print(i_lame, self.lame_length[i_lame], self.lame_width[i_lame])
                objects.append(vapory.Box([-self.lame_length[i_lame]/2, 0, -self.lame_width[i_lame]/2], 
                                          [self.lame_length[i_lame]/2, self.lames_height,  self.lame_width[i_lame]/2], 
                                           vapory.Pigment('color', [1, 1, 1]),
                                           vapory.Finish('phong', 0.8, 'reflection', reflection),
                                           'rotate', (0, -self.lames[2, i_lame]*180/np.pi, 0), #HACK?
                                           'translate', (self.lames[0, i_lame], 0, self.lames[1, i_lame])
                                          )
                              )

            objects.append(light)
            return vapory.Scene( vapory.Camera('angle', fov, "location", location, "look_at", look_at),
                           objects = objects,
                           included=["glass.inc"] )
        import moviepy.editor as mpy
        if not os.path.isfile(fname):
            self.dt = 1./fps
            def make_frame(t):
                return scene(t).render(width=W, height=H, antialiasing=antialiasing)

            clip = mpy.VideoClip(make_frame, duration=duration)
            clip.write_videofile(fname, fps=fps)
        return mpy.ipython_display(fname, fps=fps, loop=1, autoplay=1)
Ejemplo n.º 5
0
def to_gif(img_list, file_path=None, frame_duration=0.1, do_display=True):
    clips = [
        ImageClip(np.array(img)).set_duration(frame_duration)
        for img in img_list
    ]

    clip = concatenate_videoclips(clips,
                                  method="compose",
                                  bg_color=(255, 255, 255))

    if file_path is not None:
        clip.write_gif(file_path, fps=24, verbose=False, logger=None)

    if do_display:
        src = clip if file_path is None else file_path
        ipd.display(
            ipython_display(src,
                            fps=24,
                            rd_kwargs=dict(logger=None),
                            autoplay=1,
                            loop=1))
Ejemplo n.º 6
0
    def animate(self, fps=10, W=1000, H=618, duration=20, scale='auto', fname=None):
        if fname is None:
            import tempfile
            fname = tempfile.mktemp() + '.webm'
        import matplotlib.pyplot as plt
        self.dt = 1./fps
        inches_per_pt = 1.0/72.27
        from moviepy.video.io.bindings import mplfig_to_npimage
        import moviepy.editor as mpy
        def make_frame_mpl(t):
            self.t = t
            self.update()
            fig = plt.figure(figsize=(W*inches_per_pt, H*inches_per_pt))
            fig, ax = self.plot_structure(fig=fig, ax=None, scale=scale)
            #ax.clear()
            ax.axis('off')
            #fig, ax = self.plot_structure(fig=fig, ax=ax)
            return mplfig_to_npimage(fig) # RGB image of the figure

        animation = mpy.VideoClip(make_frame_mpl, duration=duration)
        plt.close('all')
        animation.write_videofile(fname, fps=fps)
        return mpy.ipython_display(fname, fps=fps, loop=1, autoplay=1, width=W)
Ejemplo n.º 7
0
def wfc_execute(WFC_VISUALIZE=False, WFC_PROFILE=False, WFC_LOGGING=False):

    solver_to_use = "default"  #"minizinc"

    wfc_stats_tracking = {
        "observations": 0,
        "propagations": 0,
        "time_start": None,
        "time_end": None,
        "choices_before_success": 0,
        "choices_per_run": [],
        "success": False
    }
    wfc_stats_data = []
    stats_file_name = f"output/stats_{time.time()}.tsv"

    with open(stats_file_name, "a+") as stats_file:
        stats_file.write(
            "id\tname\tsuccess?\tattempts\tobservations\tpropagations\tchoices_to_solution\ttotal_observations_before_solution_in_last_restart\ttotal_choices_before_success_across_restarts\tbacktracking_total\ttime_passed\ttime_start\ttime_end\tfinal_time_end\tgenerated_size\tpattern_count\tseed\tbacktracking?\tallowed_restarts\tforce_the_use_of_all_patterns?\toutput_filename\n"
        )

    default_backtracking = False
    default_allowed_attempts = 10
    default_force_use_all_patterns = False

    xdoc = ET.ElementTree(file="samples_original.xml")
    counter = 0
    choices_before_success = 0
    for xnode in xdoc.getroot():
        counter += 1
        choices_before_success = 0
        if ("#comment" == xnode.tag):
            continue

        name = xnode.get('name', "NAME")
        global hackstring
        hackstring = name
        print("< {0} ".format(name), end='')
        if "backtracking_on" == xnode.tag:
            default_backtracking = True
        if "backtracking_off" == xnode.tag:
            default_backtracking = False
        if "one_allowed_attempts" == xnode.tag:
            default_allowed_attempts = 1
        if "ten_allowed_attempts" == xnode.tag:
            default_allowed_attempts = 10
        if "force_use_all_patterns" == xnode.tag:
            default_force_use_all_patterns = True
        if "overlapping" == xnode.tag:
            choices_before_success = 0
            print("beginning...")
            print(xnode.attrib)
            current_output_file_number = 97000 + (counter * 10)
            wfc_ns = types.SimpleNamespace(
                output_path="output/",
                img_filename="samples/" + xnode.get('name', "NAME") +
                ".png",  # name of the input file
                output_file_number=current_output_file_number,
                operation_name=xnode.get('name', "NAME"),
                output_filename="output/" + xnode.get('name', "NAME") + "_" +
                str(current_output_file_number) + "_" + str(time.time()) +
                ".png",  # name of the output file
                debug_log_filename="output/" + xnode.get('name', "NAME") +
                "_" + str(current_output_file_number) + "_" +
                str(time.time()) + ".log",
                seed=11975,  # seed for random generation, can be any number
                tile_size=int(xnode.get('tile_size',
                                        1)),  # size of tile, in pixels
                pattern_width=int(
                    xnode.get('N', 2)
                ),  # Size of the patterns we want. 2x2 is the minimum, larger scales get slower fast.
                channels=3,  # Color channels in the image (usually 3 for RGB)
                symmetry=int(xnode.get('symmetry', 8)),
                ground=int(xnode.get('ground', 0)),
                adjacency_directions=dict(
                    enumerate([
                        CoordXY(x=0, y=-1),
                        CoordXY(x=1, y=0),
                        CoordXY(x=0, y=1),
                        CoordXY(x=-1, y=0)
                    ])
                ),  # The list of adjacencies that we care about - these will be turned into the edges of the graph
                periodic_input=string2bool(xnode.get(
                    'periodicInput', True)),  # Does the input wrap?
                periodic_output=string2bool(
                    xnode.get('periodicOutput',
                              False)),  # Do we want the output to wrap?
                generated_size=(int(xnode.get('width', 48)),
                                int(xnode.get('height',
                                              48))),  #Size of the final image
                screenshots=int(
                    xnode.get('screenshots', 3)
                ),  # Number of times to run the algorithm, will produce this many distinct outputs
                iteration_limit=int(
                    xnode.get('iteration_limit', 0)
                ),  # After this many iterations, time out. 0 = never time out.
                allowed_attempts=int(
                    xnode.get('allowed_attempts', default_allowed_attempts)
                ),  # Give up after this many contradictions
                stats_tracking=wfc_stats_tracking.copy(),
                backtracking=string2bool(
                    xnode.get('backtracking', default_backtracking)),
                force_use_all_patterns=default_force_use_all_patterns,
                force_fail_first_solution=False)
            wfc_ns.stats_tracking[
                "choices_before_success"] += choices_before_success
            wfc_ns.stats_tracking["time_start"] = time.time()
            pr = cProfile.Profile()
            pr.enable()
            wfc_ns = find_pattern_center(wfc_ns)
            wfc_ns = wfc.wfc_utilities.load_visualizer(wfc_ns)
            ##
            ## Load image and make tile data structures
            ##
            wfc_ns.img = load_source_image(wfc_ns.img_filename)
            wfc_ns.channels = wfc_ns.img.shape[
                -1]  # detect if it uses channels other than RGB...
            wfc_ns.tiles = image_to_tiles(wfc_ns.img, wfc_ns.tile_size)
            wfc_ns.tile_catalog, wfc_ns.tile_grid, wfc_ns.code_list, wfc_ns.unique_tiles = make_tile_catalog(
                wfc_ns)
            wfc_ns.tile_ids = {
                v: k
                for k, v in dict(enumerate(wfc_ns.unique_tiles[0])).items()
            }
            wfc_ns.tile_weights = {
                a: b
                for a, b in zip(wfc_ns.unique_tiles[0], wfc_ns.unique_tiles[1])
            }

            if WFC_VISUALIZE:
                show_input_to_output(wfc_ns)
                show_extracted_tiles(wfc_ns)
                show_false_color_tile_grid(wfc_ns)

            wfc_ns.pattern_catalog, wfc_ns.pattern_weights, wfc_ns.patterns, wfc_ns.pattern_grid = make_pattern_catalog_with_symmetry(
                wfc_ns.tile_grid, wfc_ns.pattern_width, wfc_ns.symmetry,
                wfc_ns.periodic_input)
            if WFC_VISUALIZE:
                show_pattern_catalog(wfc_ns)
            adjacency_relations = adjacency_extraction_consistent(
                wfc_ns, wfc_ns.patterns)
            if WFC_VISUALIZE:
                show_adjacencies(wfc_ns, adjacency_relations[:256])
            wfc_ns = wfc.wfc_patterns.detect_ground(wfc_ns)
            pr.disable()

            screenshots_collected = 0
            while screenshots_collected < wfc_ns.screenshots:
                wfc_logger.info(f"Starting solver #{screenshots_collected}")
                screenshots_collected += 1
                wfc_ns.seed += 100

                choice_before_success = 0
                #wfc_ns.stats_tracking["choices_before_success"] = 0# += choices_before_success
                wfc_ns.stats_tracking["time_start"] = time.time()
                wfc_ns.stats_tracking["final_time_end"] = None

                # update output name so each iteration has a unique filename
                output_filename = "output/" + xnode.get(
                    'name', "NAME"
                ) + "_" + str(current_output_file_number) + "_" + str(
                    time.time()) + "_" + str(
                        wfc_ns.seed) + ".png",  # name of the output file

                profile_filename = "" + str(
                    wfc_ns.output_path) + "setup_" + str(
                        wfc_ns.output_file_number) + "_" + str(
                            wfc_ns.seed) + "_" + str(time.time()) + "_" + str(
                                wfc_ns.seed) + ".profile"
                if WFC_PROFILE:
                    with open(profile_filename, 'w') as profile_file:
                        ps = pstats.Stats(pr, stream=profile_file)
                        ps.sort_stats('cumtime', 'ncalls')
                        ps.print_stats(20)
                solution = None

                if "minizinc" == solver_to_use:
                    attempt_count = 0
                    #while attempt_count < wfc_ns.allowed_attempts:
                    #    attempt_count += 1
                    #    solution = mz_run(wfc_ns)
                    #    solution.wfc_ns.stats_tracking["attempt_count"] = attempt_count
                    #    solution.wfc_ns.stats_tracking["choices_before_success"] += solution.wfc_ns.stats_tracking["observations"]

                else:
                    if True:
                        attempt_count = 0
                        #print("allowed attempts: " + str(wfc_ns.allowed_attempts))
                        attempt_wfc_ns = copy.deepcopy(wfc_ns)
                        attempt_wfc_ns.stats_tracking[
                            "time_start"] = time.time()
                        attempt_wfc_ns.stats_tracking[
                            "choices_before_success"] = 0
                        attempt_wfc_ns.stats_tracking[
                            "total_observations_before_success"] = 0
                        wfc.wfc_solver.reset_backtracking_count(
                        )  # reset the count of how many times we've backtracked, because multiple attempts are handled here instead of there
                        while attempt_count < wfc_ns.allowed_attempts:
                            attempt_count += 1
                            print(attempt_count, end=' ')
                            attempt_wfc_ns.seed += 7  # change seed for each attempt...
                            solution = wfc_run(attempt_wfc_ns,
                                               visualize=WFC_VISUALIZE,
                                               logging=WFC_LOGGING)
                            solution.wfc_ns.stats_tracking[
                                "attempt_count"] = attempt_count
                            solution.wfc_ns.stats_tracking[
                                "choices_before_success"] += solution.wfc_ns.stats_tracking[
                                    "observations"]
                            attempt_wfc_ns.stats_tracking[
                                "total_observations_before_success"] += solution.wfc_ns.stats_tracking[
                                    'total_observations']
                            wfc_logger.info("result: {} is {}".format(
                                attempt_count, solution.result))
                            if solution.result == -2:
                                attempt_count = wfc_ns.allowed_attempts
                                solution.wfc_ns.stats_tracking[
                                    "time_end"] = time.time()
                            wfc_stats_data.append(
                                solution.wfc_ns.stats_tracking.copy())
                    solution.wfc_ns.stats_tracking[
                        "final_time_end"] = time.time()
                    print("tracking choices before success...")
                    choices_before_success = solution.wfc_ns.stats_tracking[
                        "choices_before_success"]
                    time_passed = None
                    if None != solution.wfc_ns.stats_tracking["time_end"]:
                        time_passed = solution.wfc_ns.stats_tracking[
                            "time_end"] - solution.wfc_ns.stats_tracking[
                                "time_start"]
                    else:
                        if None != solution.wfc_ns.stats_tracking[
                                "final_time_end"]:
                            time_passed = solution.wfc_ns.stats_tracking[
                                "final_time_end"] - solution.wfc_ns.stats_tracking[
                                    "time_start"]

                    print("...finished calculating time passed")
                    #print(wfc_stats_data)
                    print("writing stats...", end='')

                    with open(stats_file_name, "a+") as stats_file:
                        stats_file.write(
                            f"{solution.wfc_ns.output_file_number}\t{solution.wfc_ns.operation_name}\t{solution.wfc_ns.stats_tracking['success']}\t{solution.wfc_ns.stats_tracking['attempt_count']}\t{solution.wfc_ns.stats_tracking['observations']}\t{solution.wfc_ns.stats_tracking['propagations']}\t{solution.wfc_ns.stats_tracking['choices_before_success']}\t{solution.wfc_ns.stats_tracking['total_observations']}\t{attempt_wfc_ns.stats_tracking['total_observations_before_success']}\t{solution.backtracking_total}\t{time_passed}\t{solution.wfc_ns.stats_tracking['time_start']}\t{solution.wfc_ns.stats_tracking['time_end']}\t{solution.wfc_ns.stats_tracking['final_time_end']}\t{solution.wfc_ns.generated_size}\t{len(solution.wfc_ns.pattern_weights.keys())}\t{solution.wfc_ns.seed}\t{solution.wfc_ns.backtracking}\t{solution.wfc_ns.allowed_attempts}\t{solution.wfc_ns.force_use_all_patterns}\t{solution.wfc_ns.output_filename}\n"
                        )
                    print("done")

                if WFC_VISUALIZE:
                    print("visualize")
                    if None == solution:
                        print("n u l l")
                    #print(solution)
                    print(1)
                    solution_vis = wfc.wfc_solver.render_recorded_visualization(
                        solution.recorded_vis)
                    #print(solution)
                    print(2)

                    video_fn = f"{solution.wfc_ns.output_path}/crystal_example_{solution.wfc_ns.output_file_number}_{time.time()}.mp4"
                    wfc_logger.info("*****************************")
                    wfc_logger.warning(video_fn)
                    print(
                        f"solver recording stack len - {len(solution_vis.solver_recording_stack)}"
                    )
                    print(solution_vis.solver_recording_stack[0].shape)
                    if len(solution_vis.solver_recording_stack) > 0:
                        wfc_logger.info(
                            solution_vis.solver_recording_stack[0].shape)
                        writer = FFMPEG_VideoWriter(video_fn, [
                            solution_vis.solver_recording_stack[0].shape[0],
                            solution_vis.solver_recording_stack[0].shape[1]
                        ], 12.0)
                        for img_data in solution_vis.solver_recording_stack:
                            writer.write_frame(img_data)
                        print('!', end='')
                        writer.close()
                        mpy.ipython_display(video_fn, height=700)
                print("recording done")
                if WFC_VISUALIZE:
                    solution = wfc_partial_output(solution)
                    show_rendered_patterns(solution, True)
                print("render to output")
                render_patterns_to_output(solution, True, False)
                print("completed")
                print("\n{0} >".format(name))

        elif "simpletiled" == xnode.tag:
            print("> ", end="\n")
            continue
        else:
            continue
models = []
for i in [100, 500, 1000, 4000]:
  ca = CAModel()
  ca.load_weights('train_log/%04d'%i)
  models.append(ca)

out_fn = 'train_steps_damage_%d.mp4'%DAMAGE_N
x = np.zeros([len(models), 72, 72, CHANNEL_N], np.float32)
x[..., 36, 36, 3:] = 1.0
with VideoWriter(out_fn) as vid:
  for i in tqdm.trange(500):
    vis = np.hstack(to_rgb(x))
    vid.add(zoom(vis, 2))
    for ca, xk in zip(models, x):
      xk[:] = ca(xk[None,...])[0]
mvp.ipython_display(out_fn)

#@title Training Progress (Batches)
frames = sorted(glob.glob('train_log/batches_*.jpg'))
mvp.ImageSequenceClip(frames, fps=10.0).write_videofile('batches.mp4')
mvp.ipython_display('batches.mp4')

#@title Pool Contents
frames = sorted(glob.glob('train_log/*_pool.jpg'))[:80]
mvp.ImageSequenceClip(frames, fps=20.0).write_videofile('pool.mp4')
mvp.ipython_display('pool.mp4')

"""## Pretrained Models and Figures

Please run the cell below to download pretrained models that are used to generate the subsequent figures. The figures generated after this are generated using the pretrained CAs.
"""
Ejemplo n.º 9
0
    def render(self,
               fps=10,
               W=1000,
               H=618,
               location=[0, 1.75, -5],
               head_size=.4,
               light_intensity=1.2,
               reflection=1.,
               look_at=[0, 1.5, 0],
               fov=75,
               antialiasing=0.001,
               duration=5,
               fname='/tmp/temp.mp4'):
        def scene(t):
            """
            Returns the scene at time 't' (in seconds)
            """

            head_location = np.array(location) - np.array([0, 0, head_size])
            import vapory
            light = vapory.LightSource([15, 15, 1], 'color',
                                       [light_intensity] * 3)
            background = vapory.Box(
                [0, 0, 0], [1, 1, 1],
                vapory.Texture(
                    vapory.Pigment(
                        vapory.ImageMap('png',
                                        '"../files/VISUEL_104.png"', 'once')),
                    vapory.Finish('ambient', 1.2)), 'scale',
                [self.background_depth, self.background_depth, 0], 'translate',
                [
                    -self.background_depth / 2, -.45 * self.background_depth,
                    -self.background_depth / 2
                ])
            me = vapory.Sphere(
                head_location, head_size,
                vapory.Texture(vapory.Pigment('color', [1, 0, 1])))
            self.t = t
            self.update()
            objects = [background, me, light]

            for i_lame in range(self.N_lame):
                #print(i_lame, self.lame_length[i_lame], self.lame_width[i_lame])
                objects.append(
                    vapory.Box(
                        [
                            -self.lame_length[i_lame] / 2, 0,
                            -self.lame_width[i_lame] / 2
                        ],
                        [
                            self.lame_length[i_lame] / 2, self.lames_height,
                            self.lame_width[i_lame] / 2
                        ],
                        vapory.Pigment('color', [1, 1, 1]),
                        vapory.Finish('phong', 0.8, 'reflection', reflection),
                        'rotate',
                        (0, -self.lames[2, i_lame] * 180 / np.pi, 0),  #HACK?
                        'translate',
                        (self.lames[0, i_lame], 0, self.lames[1, i_lame])))

            objects.append(light)
            return vapory.Scene(vapory.Camera('angle', fov, "location",
                                              location, "look_at", look_at),
                                objects=objects,
                                included=["glass.inc"])

        import moviepy.editor as mpy
        if not os.path.isfile(fname):
            self.dt = 1. / fps

            def make_frame(t):
                return scene(t).render(width=W,
                                       height=H,
                                       antialiasing=antialiasing)

            clip = mpy.VideoClip(make_frame, duration=duration)
            clip.write_videofile(fname, fps=fps)
        return mpy.ipython_display(fname, fps=fps, loop=1, autoplay=1)
Ejemplo n.º 10
0
def showarray_saved(img, **kw):
    path = save_vid(img, **kw)
    display(mpy.ipython_display(str(path)))
Ejemplo n.º 11
0
print(solution.solver_recording_stack[0].shape)
writer = FFMPEG_VideoWriter(video_fn, solution.solver_recording_stack[0].shape,
                            12.0)
# for i in range(24):
#    writer.write_frame(solution.solver_recording_stack[0])
for img_data in solution.solver_recording_stack:
    writer.write_frame(img_data)
    # print(_normalize_array(img_data))
    print("!", end="")
# for i in range(24):
#    writer.write_frame(solution.solver_recording_stack[-1])
# for i in range(24):
#    writer.write_frame(solution.solver_recording_stack[0])
# for img_data in solution.solver_recording_stack:
#    writer.write_frame(img_data)
#    #print(_normalize_array(img_data))
#    print('!',end='')
# for i in range(24):
#    writer.write_frame(solution.solver_recording_stack[-1])

writer.close()

mpy.ipython_display(video_fn, height=700)

solution = wfc_partial_output(solution)
show_rendered_patterns(solution, True)
render_patterns_to_output(solution, True)
# show_crystal_time(solution, True)
# show_rendered_patterns(solution, False)
# render_patterns_to_output(solution, False)

# %% Use to spawn in the neurons

spawn_neuron(world, 1, (size // 3, size // 3))
spawn_neuron(world, 2, (size // 3, size - (size // 3)))
spawn_neuron(world, 3, (size - (size // 3), size - (size // 3)))
spawn_neuron(world, 4, (size - (size // 3), size // 3))

# %% Use to observe a single step of the update loop
world = next_state(world)
print(world)
pl.imshow(to_rgb(matrix_to_png()))

# %%
for i in range(0, 20):
    world = next_state(world)
pl.imshow(to_rgb(matrix_to_png()))

# %%
with VideoWriter('teaser.mp4') as vid:
    # spawn
    # spawn_neuron(world, 1, (size//3,size//3))
    # spawn_neuron(world, 2, (size-(size//3),size-(size//3)))
    # grow
    for i in tqdm.trange(500):
        world = next_state(world)
        vid.add(to_rgb(matrix_to_png()))

mvp.ipython_display('teaser.mp4', maxduration=200, loop=True)
Ejemplo n.º 13
0
 def show(self, **kw):
     self.close()
     fn = self.params['filename']
     display(mvp.ipython_display(fn, **kw))