def test_image_io(): pixel = (np.random.rand(128, 128, 3) * 255).astype(np.uint8) for ext in ['bmp', 'png']: # jpg is also supported but hard to test here since it's lossy fn = 'taichi-image-io-test.' + ext ti.imwrite(pixel, fn) pixel_r = ti.imread(fn) assert (pixel_r == pixel).all() os.remove(fn)
def verify_image(window, image_name): if REGENERATE_GROUNDTRUTH_IMAGES: ground_truth_name = f"tests/python/expected/{image_name}.png" window.write_image(ground_truth_name) else: ground_truth_name = str( pathlib.Path(__file__).parent) + f"/expected/{image_name}.png" actual_name = get_temp_png() window.write_image(actual_name) ground_truth_np = ti.imread(ground_truth_name) actual_np = ti.imread(actual_name) assert len(ground_truth_np.shape) == len(actual_np.shape) for i in range(len(ground_truth_np.shape)): assert ground_truth_np.shape[i] == actual_np.shape[i] diff = ground_truth_np - actual_np mse = np.mean(diff * diff) assert mse <= 0.1 # the pixel values are 0~255 os.remove(actual_name)
def texture_as_field(filename): img_np = np.float32(ti.imread(filename) / 255) img = ti.Vector.field(3, float, img_np.shape[:2]) @ti.materialize_callback def init_texture(): img.from_numpy(img_np) return img
def test_image_io_vector(resx, resy, comp, ext, dt): shape = (resx, resy) pixel = np.random.rand(*shape, comp).astype(to_numpy_type(dt)) pixel_t = ti.Vector.field(comp, dt, shape) pixel_t.from_numpy(pixel) fn = make_temp_file(suffix='.' + ext) ti.imwrite(pixel_t, fn) pixel_r = (ti.imread(fn).astype(to_numpy_type(dt)) + 0.5) / 256.0 assert np.allclose(pixel_r, pixel, atol=2e-2) os.remove(fn)
def test_image_io_uint(resx, resy, comp, ext, dt): shape = (resx, resy) np_type = to_numpy_type(dt) # When saving to disk, pixel data will be truncated into 8 bits. # Be careful here if you want lossless saving. np_max = np.iinfo(np_type).max // 256 pixel = np.random.randint(256, size=(*shape, comp), dtype=np_type) * np_max pixel_t = ti.Vector.field(comp, dt, shape) pixel_t.from_numpy(pixel) fn = make_temp_file(suffix='.' + ext) ti.imwrite(pixel_t, fn) pixel_r = ti.imread(fn).astype(np_type) * np_max assert (pixel_r == pixel).all() os.remove(fn)
def test_image_io(resx, resy, comp, ext, is_field, dt): if comp != 1: shape = (resx, resy, comp) else: shape = (resx, resy) if is_field: pixel_t = ti.field(dt, shape) pixel = np.random.randint(256, size=shape, dtype=to_numpy_type(dt)) if is_field: pixel_t.from_numpy(pixel) fn = make_temp_file(suffix='.' + ext) if is_field: ti.imwrite(pixel_t, fn) else: ti.imwrite(pixel, fn) pixel_r = ti.imread(fn) if comp == 1: # from (resx, resy, 1) to (resx, resy) pixel_r = pixel_r.reshape((resx, resy)) assert (pixel_r == pixel).all() os.remove(fn)
def test_save_image_without_window(dtype): n = 255 pixels = ti.field(dtype=dtype, shape=(n, n, 3)) @ti.kernel def paint(c: dtype): for i, j, k in pixels: pixels[i, j, k] = c gui = ti.GUI("Test", res=(n, n), show_gui=False) for i in [0, 32, 64, 128, 255]: if dtype is ti.u8: paint(i) else: paint(i * 1.0 / n) gui.set_image(pixels) image_path = make_temp_file(suffix='.png') gui.show(image_path) image = ti.imread(image_path) delta = (image - i).sum() assert delta == 0, "Expected image difference to be 0 but got {} instead.".format( delta)
def __init__(self, texture, scale=None): super().__init__() if isinstance(texture, str): texture = ti.imread(texture) # convert UInt8 into Float32 for storage: if texture.dtype == np.uint8: texture = texture.astype(np.float32) / 255 elif texture.dtype == np.float64: texture = texture.astype(np.float32) if len(texture.shape) == 3 and texture.shape[2] == 1: texture = texture.reshape(texture.shape[:2]) # either RGB or greyscale if len(texture.shape) == 2: self.texture = ti.field(float, texture.shape) else: assert len(texture.shape) == 3, texture.shape texture = texture[:, :, :3] assert texture.shape[2] == 3, texture.shape if scale is not None: if callable(scale): texture = scale(texture) else: texture *= np.array(scale)[None, None, ...] # TODO: use create_field for this self.texture = ti.Vector.field(3, float, texture.shape[:2]) @ti.materialize_callback def init_texture(): self.texture.from_numpy(texture)
import taichi as ti import numpy as np import math ti.init(arch=ti.cpu) h = 512 w = 512 I = ti.field(dtype=ti.f32, shape=(h, w)) read_np = np.ndarray(dtype=np.uint8, shape=(h, w)) read_np = ti.imread('target.png') @ti.kernel def test(): # for i, j in ti.ndrange(h, w): # if i > 50 and i < 461: # I[i, j] = 0.5 print('test') test() gui = ti.GUI("RTE_DR") while gui.running: while gui.get_event(ti.GUI.PRESS): if gui.event.key == ti.GUI.ESCAPE: exit() gui.set_image(read_np) gui.show()
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() obj = t3.readobj('assets/torus.obj', scale=0.8) model = t3.Model(t3.Mesh.from_obj(obj)) model.material = t3.Material( t3.CookTorrance(color=t3.Texture(ti.imread('assets/cloth.jpg')), )) scene.add_model(model) camera = t3.Camera() camera.ctl = t3.CameraCtl(pos=[0, 1, -1.8]) scene.add_camera(camera) light = t3.Light([0.4, -1.5, 1.8]) scene.add_light(light) gui = ti.GUI('Model', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) gui.show()
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() model = t3.Model(obj=t3.readobj('assets/torus.obj'), tex=ti.imread('assets/cloth.jpg')) camera = t3.Camera() scene.add_model(model) scene.add_camera(camera) light = t3.Light([0.4, -1.5, 1.8]) scene.add_light(light) gui = ti.GUI('Model', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) gui.show()
import taichi as ti import numpy as np ti.init() N = 320 img = ti.Vector(3, dt=ti.f32, shape=(N, N)) canvas = ti.Vector(3, dt=ti.f32, shape=(N * 2, N)) F = ti.Matrix(2, 2, dt=ti.f32, shape=()) cursor_rest = ti.Vector(2, dt=ti.f32, shape=()) cursor_deformed = ti.Vector(2, dt=ti.f32, shape=()) img.from_numpy( ti.imread('D:\\My_Learning\\taichi\\demos-lec3\\bob.png')[:, :, :3].astype( np.float32) / 255) print(img.shape) @ti.kernel def paint(): for i, j in canvas: if i < N: canvas[i, j] = img[i, j] #pass else: x_deformed = ti.Vector([(i - N) / N - 0.5, j / N - 0.5]) Finv = F[None].inverse() x_rest = Finv @ x_deformed p = min(N - 1, max(0, int((x_rest[0] + 0.5) * N))) q = min(N - 1, max(0, int((x_rest[1] + 0.5) * N)))
import taichi as ti import os pixel = ti.field(ti.u8, shape=(512, 512, 3)) @ti.kernel def paint(): for I in ti.grouped(pixel): pixel[I] = ti.random() * 255 paint() pixel = pixel.to_numpy() ti.imshow(pixel, 'Random Generated') for ext in ['bmp', 'png', 'jpg']: fn = 'taichi-example-random-img.' + ext ti.imwrite(pixel, fn) pixel_r = ti.imread(fn) if ext != 'jpg': assert (pixel_r == pixel).all() else: ti.imshow(pixel_r, 'JPEG Read Result') os.remove(fn)
def read_image(fname): img.from_numpy(ti.imread(fname)[:, :, :3].astype(np.float32) / 255.0)
main_res=args.res, n_views=args.views, sty_res=args.viewres) else: if args.method == "mpm": from nssim import MPMSimulator2D sim = MPMSimulator2D("cuda", grid_size=args.gridsize, num_particles=args.npart, dt=args.dt, lr=args.lr) else: from nssim import SPHSimulator2D sim = SPHSimulator2D("cuda", grid_size=args.gridsize, num_particles=args.npart, dt=args.dt, lr=args.lr) from nssim import CanvasRenderer renderer = CanvasRenderer(sim, main_res=args.res) sim.initialize() gui = ti.GUI('Fluid', args.res, background_color=0x00000) target_img = ti.imread(args.style_img[0]).astype(np.float32) / 255 # initial color field # resize image if necessary if target_img.shape[0] != args.viewres: target_img = resize(target_img, (args.viewres, args.viewres), anti_aliasing=True) if args.dim == 3: target_img = np.tile(target_img, (args.views, 1, 1, 1)) main_tag = TAG_START # tag to send/recv stylized images switch_tag = 1 # tag to switch images ready = True # the ready flag controls whether to send a new input to styler paused = False # keyboard response may take a few frames, a flag is used to avoid repetitive events in_event = False request_recv = None sim.set_target(target_img)
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() texture = ti.imread("assets/cloth.jpg") model = t3.Model(obj=t3.readobj('assets/monkey.obj', scale=0.6), tex=texture) scene.add_model(model) camera = t3.Camera(res=(512, 512), pos=[0, 0, -2], target=[0, 0, 0]) scene.add_camera(camera) light = t3.Light(dir=[0, 0, 1], color=[1.0, 1.0, 1.0]) scene.add_light(light) light2 = t3.Light(dir=[0, -1, 0], color=[1.0, 1.0, 1.0]) scene.add_light(light2) gui = ti.GUI('Model', camera.res) while gui.running: gui.running = not gui.get_event(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) gui.show()
vr.set_volume(vol) vr.cam_pos[None] = tl.vec3(*in_circles(t)) # Create Reference if necessary if args.ref: # Generate new reference vr.set_tf_tex(tf) vr.forward(args.fw_sampling_rate, jitter=False) plot_tf(vr.tf_tex.to_torch().permute( 1, 0).contiguous()).savefig('temp_tf_reference.png') gui_fw.set_image(vr.output_rgb) gui_fw.show() gui_bw.set_image(vr.output_rgb) gui_bw.show() ti.imwrite(vr.output_rgba, 'temp_reference.png') vr.set_reference(vr.output_rgba.to_numpy()) else: # Use old reference vr.set_reference(ti.imread('temp_reference.png') / 255.0) # Optimize for Transfer Function vr.set_tf_tex(tf_init) # initial tf lr = args.lr for i in range(args.iterations): # Optimization vr.backward(args.bw_sampling_rate, jitter=args.bw_jitter) # computes grads vr.apply_grad(lr, args.mom, args.clip_grads) # Gradient descent with momentum lr *= args.lr_decay # decay # Log Backward Pass gui_bw.set_image(vr.output_rgb) render_video.write_frame(vr.output_rgb) gui_bw.show()
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() obj = t3.readobj('assets/torus.obj', scale=0.8) model = t3.Model(t3.Mesh.from_obj(obj)) model.material = t3.Material(t3.CookTorrance( color=t3.Texture(ti.imread('assets/cloth.jpg')), )) scene.add_model(model) camera = t3.Camera() camera.ctl = t3.CameraCtl(pos=[0, 1, -1.8]) scene.add_camera(camera) light = t3.Light([0.4, -1.5, 1.8]) scene.add_light(light) gui = ti.GUI('Textures', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) gui.show()
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() model = t3.Model(t3.Mesh.from_obj(t3.readobj('assets/torus.obj', scale=0.8))) model.material = t3.Material( t3.CookTorrance( color=t3.Texture(ti.imread('assets/cloth.jpg')), roughness=t3.Texture(ti.imread('assets/pattern.jpg')), metallic=t3.Constant(0.5), )) scene.add_model(model) camera = t3.Camera() camera.ctl = t3.CameraCtl(pos=[0.8, 0, 2.5]) scene.add_camera(camera) light = t3.Light([0, -0.5, -1]) scene.add_light(light) ambient = t3.AmbientLight(0.3) scene.add_light(ambient) gui = ti.GUI('PBR demo', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) model.L2W[None] = t3.rotateX(angle=t3.get_time()) scene.render() gui.set_image(camera.img)
import taichi as ti import os images = [] frames = 0 while True: file = f'{frames + 1:04d}.png' if not os.path.exists(file): break print('Loading', file) images.append(ti.imread(file)) frames += 1 res = images[0].shape[:2] gui = ti.GUI('imseqshow', res) while gui.running: gui.set_image(images[gui.frame % frames]) gui.show()
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() obj = t3.readobj('assets/torus.obj', scale=0.8) model = t3.Model(t3.Mesh.from_obj(obj)) model.material = t3.Material( t3.CookTorrance( color=t3.Texture(ti.imread('assets/cloth.jpg')), ambient=t3.Texture(ti.imread('assets/pattern.jpg')), )) scene.add_model(model) camera = t3.Camera() camera.ctl = t3.CameraCtl(pos=[0, 1, -1.8]) scene.add_camera(camera) ambient_light = t3.AmbientLight() scene.add_light(ambient_light) gui = ti.GUI('Model', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) gui.show()
import taichi as ti import numpy as np img = ti.imread('noise.png') #img = ti.imread('/opt/cuda/samples/3_Imaging/imageDenoising/data/portrait_noise.bmp') img = np.float32(img / 255) w, h, chans = img.shape src = ti.Vector.field(chans, float, (w, h)) dst = ti.Vector.field(chans, float, (w, h)) src.from_numpy(img) noise = 1 / 1.45**2 lerp_c = 0.2 win_rad = 3 blk_rad = 3 win_area = (2 * win_rad + 1)**2 wei_thres = 0.1 lerp_thres = 0.1 @ti.kernel def denoise(): for x, y in src: cnt = 0.0 wei = 0.0 clr = ti.Vector([0.0, 0.0, 0.0]) for i, j in ti.ndrange((-win_rad, win_rad + 1), (-win_rad, win_rad + 1)):
import taichi as ti import numpy as np ti.init() N = 320 img = ti.Vector(3, dt=ti.f32, shape=(N, N)) # 画布 canvas = ti.Vector(3, dt=ti.f32, shape=(N * 2, N)) F = ti.Matrix(2, 2, dt=ti.f32, shape=()) cursor_rest = ti.Vector(2, dt=ti.f32, shape=()) cursor_deformed = ti.Vector(2, dt=ti.f32, shape=()) img.from_numpy(ti.imread('bob.png')[:, :, :3].astype(np.float32) / 255) @ti.kernel def paint(): for i, j in canvas: if i < N: canvas[i, j] = img[i, j] pass else: x_deformed = ti.Vector([(i - N) / N - 0.5, j / N - 0.5]) Finv = F[None].inverse() x_rest = Finv @ x_deformed p = min(N - 1, max(0, int((x_rest[0] + 0.5) * N))) q = min(N - 1, max(0, int((x_rest[1] + 0.5) * N))) canvas[i, j] = img[p, q]
import taichi as ti ti.init() ## Load the image: input_file_name = input('Enter the input image file name: ') input_image = ti.imread(input_file_name) ## Process the image: image = ti.field(ti.u8, input_image.shape) image.from_numpy(input_image) @ti.kernel def process(): for i, j, k in image: image[i, j, k] = 255 - image[i, j, k] # revert color process() ## Save the image: output_image = image.to_numpy() ti.imshow(output_image) output_file_name = input('Enter the image file name to save: ') ti.imwrite(output_image, output_file_name)
import taichi as ti import taichi_three as t3 import numpy as np ti.init(ti.cpu) scene = t3.Scene() obj = t3.readobj('assets/cube.obj', scale=0.6) model = t3.Model(t3.Mesh.from_obj(obj)) model.material = t3.Material( t3.CookTorrance( color=t3.Texture(ti.imread('assets/cloth.jpg')), normal=t3.NormalMap( texture=t3.Texture(ti.imread('assets/normal.png'))), )) scene.add_model(model) camera = t3.Camera() camera.ctl = t3.CameraCtl(pos=[0, 1, 1.8]) scene.add_camera(camera) light = t3.Light([0.4, -0.8, -1.7]) scene.add_light(light) gui = ti.GUI('Normal map', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) #gui.set_image(camera.fb['normal'].to_numpy() * 0.5 + 0.5) gui.show()
def show(self, arguments: list = sys.argv[2:]): """Visualize an OBJ/NPZ model using Taichi THREE""" parser = argparse.ArgumentParser(prog='t3 show', description=f"{self.show.__doc__}") parser.add_argument( 'filename', help='File name of the OBJ/NPZ model to visualize, e.g. monkey.obj') parser.add_argument('-s', '--scale', default=0.75, type=float, help='Specify a scale parameter') parser.add_argument('-u', '--resx', default=512, type=int, help='Specify window width') parser.add_argument('-v', '--resy', default=512, type=int, help='Specify window height') parser.add_argument('-A', '--ambient', default=0, type=float, help='Specify ambient light strength') parser.add_argument('-o', '--ortho', action='store_true', help='Display in orthogonal mode') parser.add_argument('-l', '--lowp', action='store_true', help='Shade faces by interpolation') parser.add_argument('-x', '--flipx', action='store_true', help='Flip X axis of model when display') parser.add_argument('-y', '--flipy', action='store_true', help='Flip Y axis of model when display') parser.add_argument('-z', '--flipz', action='store_true', help='Flip Z axis of model when display') parser.add_argument('-f', '--flipface', action='store_true', help='Flip face culling direction') parser.add_argument('-F', '--flipnorm', action='store_true', help='Flip face normal direction') parser.add_argument('-b', '--bothface', action='store_true', help='Including both face, no culling') parser.add_argument('-N', '--renorm', action='store_true', help='Reset normal vectors to flat') parser.add_argument('-S', '--showhints', action='store_true', help='Show information about pixel under cursor') parser.add_argument('-T', '--taa', default=True, action='store_true', help='Enable temporal anti-aliasing') parser.add_argument('-t', '--texture', type=str, help='Path to texture to bind') parser.add_argument('-n', '--normtex', type=str, help='Path to normal map to bind') parser.add_argument('-m', '--metallic', type=str, help='Path to metallic map to bind') parser.add_argument('-r', '--roughness', type=str, help='Path to roughness map to bind') parser.add_argument('-a', '--arch', default='cpu', type=str, help='Backend to use for rendering') args = parser.parse_args(arguments) import taichi as ti import taichi_three as t3 import numpy as np ti.init(getattr(ti, args.arch)) scene = t3.Scene() obj = t3.readobj(args.filename, scale=args.scale if args.scale != 0 else 1) t3.objflipaxis(obj, args.flipx, args.flipy, args.flipz) if args.scale == 0: t3.objautoscale(obj) if args.flipface: t3.objflipface(obj) if args.flipnorm: t3.objflipnorm(obj) if args.renorm: t3.objmknorm(obj) if args.bothface: t3.objbothface(obj) model = (t3.ModelLow if args.lowp else t3.Model).from_obj(obj) if args.texture is not None: model.add_texture('color', ti.imread(args.texture)) if args.normtex is not None: model.add_texture('normal', ti.imread(args.normtex)) if args.metallic is not None: model.add_texture('metallic', ti.imread(args.metallic)) if args.roughness is not None: model.add_texture('roughness', ti.imread(args.roughness)) scene.add_model(model) camera = t3.Camera(res=(args.resx, args.resy), taa=args.taa) if args.showhints: camera.fb.add_buffer('pos', 3) camera.fb.add_buffer('texcoor', 2) camera.fb.add_buffer('normal', 3) if args.ortho: camera.type = camera.ORTHO scene.add_camera(camera) if args.ambient: light = t3.AmbientLight(args.ambient) else: light = t3.Light([0.4, -1.5, 0.8]) scene.add_light(light) gui = ti.GUI('Model', camera.res) while gui.running: gui.get_event(None) gui.running = not gui.is_pressed(ti.GUI.ESCAPE) camera.from_mouse(gui) scene.render() gui.set_image(camera.img) if args.showhints: coor = gui.get_cursor_pos() pos = camera.fb.fetchpixelinfo('pos', coor) color = camera.fb.fetchpixelinfo('img', coor) texcoor = camera.fb.fetchpixelinfo('texcoor', coor) normal = camera.fb.fetchpixelinfo('normal', coor) gui.text(f'color: [{color.x:.2f} {color.y:.2f} {color.z:.2f}]; pos: [{pos.x:+.2f} {pos.y:+.2f} {pos.z:+.2f}]', (0, 1)) gui.text(f'texcoor: [{texcoor.x:.2f} {texcoor.y:.2f}]; normal: [{normal.x:+.2f} {normal.y:+.2f} {normal.z:+.2f}]', (0, 1 - 16 / camera.res[1])) gui.show()
def __init__(self, path): self.img = ti.imread(path) tex_w, tex_h = self.img.shape[0:2] self.field = ti.Vector.field(3, dtype=ti.u8, shape=(tex_w, tex_h))