Exemplo n.º 1
0
def main(export):

    resultdir = "./mandelbox_result"
    video_manager = ti.VideoManager(output_dir=resultdir,
                                    framerate=24,
                                    automatic_build=False)

    gui = ti.GUI("Mandelbox", (w, h))
    rng = range(50) if export == True else range(10000000)

    for ts in rng:

        while gui.get_event(ti.GUI.MOTION):
            if gui.event.key == ti.GUI.WHEEL:
                if gui.event.delta[1] > 0:
                    camdis[None] = max(camdis[None] - 0.5, 4)
                elif gui.event.delta[1] < 0:
                    camdis[None] += 0.5

        paint(ts)
        gui.set_image(pixels)

        gui.text(f"t:{ts}", (0.02, 0.99), color=0x000000)
        gui.text(f"scale:{scale[None]:.2}", (0.02, 0.95), color=0x000000)
        gui.text(f"min_r:{min_radius[None]:.2}", (0.02, 0.90), color=0x000000)
        gui.text(f"fix_r:{fix_radius[None]:.2}", (0.02, 0.85), color=0x000000)

        gui.show()
        if export == True:
            video_manager.write_frame(pixels.to_numpy())

    if export == True:
        print("Exporting gif result...")
        video_manager.make_video(gif=True, mp4=False)
        print("Finish.")
Exemplo n.º 2
0
    def set_output_video(self, path, framerate=24):
        '''
        Export frames painted in GUI to a video.

        FIXME: Only work for ``self.img`` render, doesn't work for ``self.circles`` for now.
        Use ``self.screenshot_dir = '/tmp'``, then ``cd /tmp && ti video`` if you wish to.
        '''
        output_dir = os.path.dirname(path)
        output_file = os.path.basename(path)
        try:
            output_ext = output_file.split(os.path.extsep)[-1]
            assert output_ext in ['gif', 'mp4']
        except:
            output_ext = None
        self.video_manager = ti.VideoManager(output_dir,
                                             framerate=framerate,
                                             automatic_build=False)
        self.video_manager.taichi_glsl_output_ext = output_ext

        def _get_output_filename(suffix):
            if output_ext is not None:
                return path[:-(len(output_ext) + 1)] + suffix
            else:
                return path + suffix

        self.video_manager.get_output_filename = _get_output_filename
Exemplo n.º 3
0
    def __init__(self, dump):
        self.display_mode = 0
        self.is_active = True
        self.dump = dump
        self.frame = 0

        if self.dump:
            result_dir = "./results"
            self.video_manager = ti.VideoManager(output_dir=result_dir,
                                                 framerate=24,
                                                 automatic_build=False)
Exemplo n.º 4
0
def video_taichi_logo(result_dir):
    from taichi.examples.rendering.taichi_logo import n, paint, x
    video_manager = ti.VideoManager(output_dir=result_dir,
                                    framerate=24,
                                    automatic_build=False)
    paint()
    gui = ti.GUI('Logo', (n, n), show_gui=False)
    for i in range(FRAMES):
        gui.set_image(x)
        video_manager.write_frame(gui.get_image())
        gui.clear()

    video_manager.make_video(mp4=True, gif=False)
Exemplo n.º 5
0
    def __init__(self,
                 grid_res,
                 res,
                 mode,
                 result_dir='./results',
                 video_rate=24,
                 auto=True):
        super().__init__(grid_res, res)

        self.mode = mode
        self.video_manager = ti.VideoManager(output_dir=result_dir,
                                             framerate=video_rate,
                                             automatic_build=auto)

        self.pixels = ti.field(ti.u8, shape=(res, res, 3))
Exemplo n.º 6
0
 def __init__(self,
              screen_res,
              scene: World,
              dump=True,
              result_dir="./results"):
     self.dump = dump
     self.scene = scene
     self.screen_res = (screen_res,
                        screen_res * scene.height // scene.width)
     self.buffer = ti.Vector.field(3, dtype=ti.f32, shape=self.screen_res)
     self.gui = ti.GUI("fluid", self.screen_res)
     if self.dump:
         self.video_manager = ti.VideoManager(output_dir=result_dir,
                                              framerate=24,
                                              automatic_build=False)
Exemplo n.º 7
0
    def solve(self):

        gui = ti.GUI("Lattice Boltzmann Method (D2Q9)", (lbm.nx, lbm.ny))
        gui.fps_limit = None
        result_dir = "./result"
        video_manager = ti.VideoManager(output_dir=result_dir,
                                        framerate=60,
                                        automatic_build=False)

        lbm.init()

        for i in range(self.steps):

            self.stream_and_collision()
            self.compute_macro_var()
            self.apply_bc()

            # rho = (self.rho.to_numpy()-1.0)*2.0

            # mask = self.mask.to_numpy()*1.0

            vel = self.vel.to_numpy()
            # vel_mag = (vel[:, :, 0]**2.0+vel[:, :, 1]**2.0)**0.5
            # vel_img = cm.plasma(vel_mag / 0.15)

            ugrad = np.gradient(vel[:, :, 0])
            vgrad = np.gradient(vel[:, :, 1])
            vor = ugrad[1] - vgrad[0]
            # vor_img = abs(ugrad[1] - vgrad[0]) * 50
            # color map
            colors = [(1, 1, 0), (0.953, 0.490, 0.016), (0, 0, 0),
                      (0.176, 0.976, 0.529), (0, 1, 1)]
            my_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
                'my_cmap', colors)
            vor_img = cm.ScalarMappable(norm=matplotlib.colors.Normalize(
                vmin=-0.02, vmax=0.02),
                                        cmap=my_cmap).to_rgba(vor)

            # img = np.concatenate((vor_img, vel_img), axis=1)

            gui.set_image(vor_img)

            # if i % 1000 == 0:
            #     video_manager.write_frame(gui.get_image())

            gui.show()
Exemplo n.º 8
0
def video_cornell_box(result_dir):
    from taichi.examples.rendering.cornell_box import (render, tonemap,
                                                       tonemapped_buffer)
    video_manager = ti.VideoManager(output_dir=result_dir,
                                    framerate=24,
                                    automatic_build=False)
    gui = ti.GUI("Taichi Cornell Box",
                 res=800,
                 background_color=0x112F41,
                 show_gui=False)
    for i in range(FRAMES):
        render()
        interval = 10
        if i % interval == 0:
            tonemap(i)

        gui.set_image(tonemapped_buffer)
        video_manager.write_frame(gui.get_image())
        gui.clear()
    video_manager.make_video(mp4=True, gif=False)
Exemplo n.º 9
0
def video_mpm99(result_dir):
    from taichi.examples.simulation.mpm99 import (dt, initialize, material,
                                                  substep, x)

    video_manager = ti.VideoManager(output_dir=result_dir,
                                    framerate=24,
                                    automatic_build=False)
    initialize()
    gui = ti.GUI("Taichi MLS-MPM-99",
                 res=512,
                 background_color=0x112F41,
                 show_gui=False)
    for i in range(FRAMES):
        for s in range(int(2e-3 // dt)):
            substep()
        gui.circles(x.to_numpy(),
                    radius=1.5,
                    palette=[0x068587, 0xED553B, 0xEEEEF0],
                    palette_indices=material)
        video_manager.write_frame(gui.get_image())
        gui.clear()
    video_manager.make_video(mp4=True, gif=False)
Exemplo n.º 10
0






init_grid()
init_particle()


gui = ti.GUI("APIC", (res, res))


result_dir = "./result"
video_manager = ti.VideoManager(output_dir=result_dir, framerate=30, automatic_build=False)


for frame in range(450):

	gui.clear(0xFFFFFF)

	for i in range(substep):
		step()


	# break
	if debug:
		for i in range(m_g):
			for j in range(m_g):
				color = 0
Exemplo n.º 11
0
run_scheme = SchemeType.Advection_Projection

from advection import MacCormackSolver
advection_solver = MacCormackSolver

from projection import RedBlackGaussSedialProjectionSolver
projection_solver = RedBlackGaussSedialProjectionSolver
p_jacobi_iters = 30
dye_decay = 0.99

# save to video(gif)
bool_save = False
save_frame_length = 240
save_root = './tmp_result'
file_name = 'Projection-MacCormack-GuassSedial-RK2'
save_path = os.path.join(save_root, file_name)
video_manager = ti.VideoManager(output_dir=save_path,
                                framerate=24,
                                automatic_build=False)

# if __name__ == '__main__':
#     import sys
#     thismodule = sys.modules[__name__]
#     print(thismodule.__dict__.items())
#     # print(get_variable_from_module('projection_config'))
#     # print(config.default_config.__dict__.items())
#     for k, v in config.default_config.__dict__.items():
#         if (k.startswith('m_')):
#             print(k, v)
#             vars()[k[2:]] = v
#     print(projection_solver)
Exemplo n.º 12
0
      cfl=0.5,
      smoke_alpha=1.0,
      smoke_beta=1000,
      temperature_decay=0.05,
      pressure_tolerance=1e-6,
      density_scaling=2,
      initial_speed=(0, 0, 0),
      tracker_generation=20,
      perturbation=0,
      pressure_solver='mgpcg',
      num_threads=2,
      open_boundary=True,
      maximum_pressure_iterations=200,
      super_sampling=20)

  video_manager = tc.VideoManager(output_dir='smoke_3d')
  images = []
  for i in range(600):
    print('frame', i)
    generation = tc.Texture('sphere', radius=0.08, center=(0.25, 0.1, 0.25)) + \
        tc.Texture('sphere', radius=0.08, center=(0.25, 0.1, 0.75)) + \
        tc.Texture('sphere', radius=0.08, center=(0.75, 0.1, 0.25)) + \
        tc.Texture('sphere', radius=0.08, center=(0.75, 0.1, 0.75))
    smoke.update(
        generation=generation * 100,
        color=tc.Texture(
            'const', value=colorsys.hls_to_rgb(0.02 * i + 0.0, 0.7, 1.0)),
        temperature=tc.Texture('const', value=(1, 0, 0, 0)),
        initial_velocity=tc.Texture('const', value=(0, 100, 0, 0)))
    smoke.step(0.03)
    particles = smoke.c.get_render_particles()
Exemplo n.º 13
0
    vr = VolumeRaycaster(volume_resolution=vol.shape,
                         max_samples=args.max_samples,
                         render_resolution=RESOLUTION,
                         tf_resolution=TF_RESOLUTION)
    t = np.pi * 1.5

    if args.task == 'backward':
        gui_bw = ti.GUI("Volume Raycaster (Backward)",
                        res=RESOLUTION,
                        fast_gui=True)
        gui_fw = ti.GUI("Volume Raycaster (Forward)",
                        res=RESOLUTION,
                        fast_gui=True)
        gui_tf = ti.GUI("Transfer Function Comparison", res=(640, 480))
        render_video = ti.VideoManager(output_dir='results/bw_render',
                                       framerate=24,
                                       automatic_build=False)
        render_video_fw = ti.VideoManager(output_dir='results/fw_render',
                                          framerate=24,
                                          automatic_build=False)
        tf_video = ti.VideoManager(output_dir='results/tf',
                                   framerate=24,
                                   automatic_build=False)
        # Setup Raycaster
        vr.set_volume(vol)
        vr.cam_pos[None] = tl.vec3(*in_circles(t))
        # Create Reference if necessary
        if args.ref:  # Generate new reference
            vr.set_tf_tex(tf)
            vr.forward(args.fw_sampling_rate, jitter=False)
            plot_tf(vr.tf_tex.to_torch().permute(
Exemplo n.º 14
0
                      cfl=0.5,
                      smoke_alpha=1.0,
                      smoke_beta=1000,
                      temperature_decay=0.05,
                      pressure_tolerance=1e-6,
                      density_scaling=2,
                      initial_speed=(0, 0, 0),
                      tracker_generation=20,
                      perturbation=0,
                      pressure_solver='mgpcg',
                      num_threads=2,
                      open_boundary=True,
                      maximum_pressure_iterations=200,
                      super_sampling=20)

    video_manager = tc.VideoManager(output_dir='new_year')
    images = []
    for i in range(600):
        print('frame', i)
        generation = tc.Texture('sphere', radius=0.08, center=(0.25, 0.1, 0.25)) + \
            tc.Texture('sphere', radius=0.08, center=(0.25, 0.1, 0.75)) + \
            tc.Texture('sphere', radius=0.08, center=(0.75, 0.1, 0.25)) + \
            tc.Texture('sphere', radius=0.08, center=(0.75, 0.1, 0.75))
        smoke.update(generation=generation * 100,
                     color=tc.Texture('const',
                                      value=colorsys.hls_to_rgb(
                                          0.02 * i + 0.0, 0.7, 1.0)),
                     temperature=tc.Texture('const', value=(1, 0, 0, 0)),
                     initial_velocity=tc.Texture('const',
                                                 value=(0, 100, 0, 0)))
        smoke.step(0.03)
Exemplo n.º 15
0
                                   filepath=tc.settings.get_asset_path(
                                       'envmaps/schoenbrunn-front_hd.hdr'))
        envmap.set_transform(
            tc.core.Matrix4(1.0).rotate_euler(tc.Vector(0, -30, 0)))
        scene.set_environment_map(envmap)

    return scene


def render_frame(t):
    renderer = tc.Renderer()
    renderer.initialize(preset='pt', sampler='sobol', scene=create_scene(t))
    renderer.set_post_processor(
        tc.post_process.LDRDisplay(exposure=1.0, bloom_radius=0.00))
    renderer.render(spp)
    return renderer.get_output()


if __name__ == '__main__':
    for i in range(res[0]):
        for j in range(res[1]):
            particles.append(Particle(i, j))
    video_manager = tc.VideoManager(output_dir='particles')
    images = []
    for i in range(frames + 1):
        print('frame', i)
        images.append(render_frame(12.0 * i / frames))

    video_manager.write_frames(images)
    video_manager.make_video()
Exemplo n.º 16
0
    for i in range(5):
      with tc.transform_scope(translate=(1.4 * (i - 2), 0.6, 0)):
        with tc.transform_scope(scale=(0.3, 1, 0.5), rotation=(90, 0, 0)):
          mesh = tc.Mesh('plane',
                         tc.SurfaceMaterial(
                             'emissive',
                             color=colorsys.hls_to_rgb(i * 0.2, 0.5, 1.0)))
          scene.add_mesh(mesh)

  return scene


if __name__ == '__main__':
  frames = 40
  video_manager = tc.VideoManager(output_dir='microfacet_anim')
  images = []
  for i in range(frames + 1):
    renderer = tc.Renderer()
    renderer.initialize(
        preset='pt',
        scene=create_scene(0.5 * (1 - math.cos(math.pi * i / frames))))
    renderer.set_post_processor(
        tc.post_process.LDRDisplay(exposure=1.0, bloom_radius=0.00))
    renderer.render(200)
    images.append(renderer.get_output())

  images = images + images[1:-1][::-1]

  video_manager.write_frames(images)
  video_manager.make_video()