Exemplo n.º 1
0
def test_cpu_debug_snode_writer_out_of_bound():
    ti.set_gdb_trigger(False)

    x = ti.var(ti.f32, shape=3)

    with pytest.raises(RuntimeError):
        x[3] = 10.0
Exemplo n.º 2
0
def test_cpu_debug_snode_reader_out_of_bound():
    ti.set_gdb_trigger(False)

    x = ti.field(ti.f32, shape=3)

    with pytest.raises(RuntimeError):
        a = x[3]
Exemplo n.º 3
0
def test_loop_grad():
    ti.set_gdb_trigger()
    ti.cfg.print_ir = True
    x = ti.var(ti.f32)

    n = 16
    m = 8

    @ti.layout
    def place():
        ti.root.dense(ti.ij, (n, m)).place(x)
        ti.root.lazy_grad()

    @ti.kernel
    def func():
        for k in range(n):
            for i in range(m - 1):
                x[k, i + 1] = x[k, i] * 2

    for k in range(n):
        x[k, 0] = k
    func()

    for k in range(n):
        x.grad[k, m - 1] = 1
    func.grad()

    for k in range(n):
        for i in range(m):
            assert x[k, i] == 2**i * k
            assert x.grad[k, i] == 2**(m - 1 - i)
Exemplo n.º 4
0
def test_not_out_of_bound():
    ti.set_gdb_trigger(False)
    x = ti.field(ti.i32, shape=(8, 16))

    @ti.kernel
    def func():
        x[7, 15] = 1

    func()
Exemplo n.º 5
0
def test_assert_minimal():
    ti.set_gdb_trigger(False)

    @ti.kernel
    def func():
        assert 0

    with pytest.raises(RuntimeError):
        func()
Exemplo n.º 6
0
def test_assert_minimal():
    ti.init(debug=True)
    ti.set_gdb_trigger(False)

    @ti.kernel
    def func():
        assert 0

    func()
Exemplo n.º 7
0
def test_assert_ok():
    ti.set_gdb_trigger(False)

    @ti.kernel
    def func():
        x = 20
        assert 10 <= x <= 20

    func()
Exemplo n.º 8
0
def test_assert_basic():
    ti.init(debug=True)
    ti.set_gdb_trigger(False)

    @ti.kernel
    def func():
        x = 20
        assert 10 <= x < 20

    func()
Exemplo n.º 9
0
def test_out_of_bound_with_offset():
    ti.init(debug=True)
    ti.set_gdb_trigger(False)
    x = ti.var(ti.i32, shape=(8, 16), offset=(-8, -8))

    @ti.kernel
    def func():
        x[0, 0] = 1

    func()
Exemplo n.º 10
0
def test_out_of_bound():
    ti.set_gdb_trigger(False)
    x = ti.field(ti.i32, shape=(8, 16))

    @ti.kernel
    def func():
        x[3, 16] = 1

    with pytest.raises(RuntimeError):
        func()
Exemplo n.º 11
0
def test_assert_basic():
    ti.set_gdb_trigger(False)

    @ti.kernel
    def func():
        x = 20
        assert 10 <= x < 20

    with pytest.raises(RuntimeError):
        func()
Exemplo n.º 12
0
def test_not_out_of_bound_with_offset():
    ti.set_gdb_trigger(False)
    x = ti.field(ti.i32, shape=(8, 16), offset=(-4, -8))

    @ti.kernel
    def func():
        x[-4, -8] = 1
        x[3, 7] = 2

    func()
Exemplo n.º 13
0
def test_not_out_of_bound():
    ti.init(debug=True)
    ti.set_gdb_trigger(False)
    x = ti.var(ti.i32, shape=(8, 16))

    @ti.kernel
    def func():
        x[7, 15] = 1

    func()
Exemplo n.º 14
0
def test_not_out_of_bound_dynamic():
    ti.set_gdb_trigger(False)
    x = ti.field(ti.i32)

    ti.root.dynamic(ti.i, 16, 4).place(x)

    @ti.kernel
    def func():
        x[3] = 1

    func()
Exemplo n.º 15
0
def test_out_of_bound_dynamic():
    ti.set_gdb_trigger(False)
    x = ti.field(ti.i32)

    ti.root.dynamic(ti.i, 16, 4).place(x)

    @ti.kernel
    def func():
        x[17] = 1

    with pytest.raises(RuntimeError):
        func()
Exemplo n.º 16
0
def test_out_of_bound_dynamic():
    ti.init(debug=True)
    ti.set_gdb_trigger(False)
    x = ti.var(ti.i32)

    ti.root.dynamic(ti.i, 16, 4).place(x)

    @ti.kernel
    def func():
        x[17] = 1

    func()
Exemplo n.º 17
0
def test_out_of_bound_with_offset():
    ti.init(debug=True)
    ti.set_gdb_trigger(False)
    x = ti.field(ti.i32, shape=(8, 16), offset=(-8, -8))

    @ti.kernel
    def func():
        x[0, 0] = 1

    with pytest.raises(RuntimeError):
        func()
        func()
Exemplo n.º 18
0
def main():
  tc.set_gdb_trigger()
  # initialization
  scene = Scene()
  # fish(scene)
  robot(scene)
  scene.finalize()
  
  for i in range(n_actuators):
    for j in range(n_sin_waves):
      weights[i, j] = np.random.randn() * 0.01
  

  for i in range(scene.n_particles):
    x[0, i] = scene.x[i]
    F[0, i] = [[1, 0], [0, 1]]
    actuator_id[i] = scene.actuator_id[i]
    particle_type[i] = scene.particle_type[i]
    

  vec = tc.vec
  losses = []
  for iter in range(100):
    ti.clear_all_gradients()
    l = forward()
    losses.append(l)
    loss.grad[None] = 1
    backward()
    print('i=', iter, 'loss=', l)
    learning_rate = 0.1

    for i in range(n_actuators):
      for j in range(n_sin_waves):
        # print(weights.grad[i, j])
        weights[i, j] -= learning_rate * weights.grad[i, j]
      bias[i] -= learning_rate * bias.grad[i]


    if iter % 10 == 0:
      # visualize
      forward(1500)
      for s in range(63, 1500, 16):
        visualize(s, 'diffmpm/iter{:03d}/'.format(iter))

  # ti.profiler_print()
  plt.title("Optimization of Initial Velocity")
  plt.ylabel("Loss")
  plt.xlabel("Gradient Descent Iterations")
  plt.plot(losses)
  plt.show()
Exemplo n.º 19
0
def main():
    tc.set_gdb_trigger()
    # initialization
    scene = Scene()
    # fish(scene)
    robot(scene)
    # scene.add_rect(0.4, 0.4, 0.2, 0.1, 0.3, 0.1, -1, 1)
    scene.finalize()

    for i in range(n_actuators):
        for j in range(n_sin_waves):
            weights[i, j] = np.random.randn() * 0.01

    for i in range(scene.n_particles):
        x[0, i] = scene.x[i]
        F[0, i] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
        actuator_id[i] = scene.actuator_id[i]
        particle_type[i] = scene.particle_type[i]

    fig = plt.figure()
    plt.ion()
    ax = fig.add_subplot(111, projection='3d')

    losses = []
    for iter in range(100):
        ti.clear_all_gradients()
        l = forward()
        losses.append(l)
        loss.grad[None] = 1
        backward()
        print('i=', iter, 'loss=', l)
        learning_rate = 30

        for i in range(n_actuators):
            for j in range(n_sin_waves):
                # print(weights.grad[i, j])
                weights[i, j] -= learning_rate * weights.grad[i, j]
            bias[i] -= learning_rate * bias.grad[i]

        if iter % 20 == 0 and iter > 0:
            # visualize
            forward()
            for s in range(7, steps, 2):
                '''
        print(s)
        img = np.zeros((res[1] * res[0] * 3,), dtype=np.float32)
        splat(s)
        copy_back_and_clear(img)
        img = img.reshape(res[1], res[0], 3)
        img = np.sqrt(img)
        cv2.imshow('img', img)
        cv2.waitKey(1)
        '''
                '''
        xs, ys, zs = [], [], []
        aas, bs, cs = [], [], []
        for i in range(n_particles):
          if particle_type[i] == 0:
            xs.append(x[s, i][0])
            ys.append(x[s, i][2])
            zs.append(x[s, i][1])
          else:
            aas.append(x[s, i][0])
            bs.append(x[s, i][2])
            cs.append(x[s, i][1])

        ax.scatter(aas, bs, cs, marker='o')
        ax.scatter(xs, ys, zs, marker='o')
        ax.set_xlim(0, 1)
        ax.set_ylim(0, 1)
        ax.set_zlim(0, 1)
        plt.draw()
        plt.pause(0.001)
        plt.cla()
        '''
                def to255(x):
                    return int(max(min(x * 255, 255), 0))

                xs, ys, zs = [], [], []
                us, vs, ws = [], [], []
                cs = []
                folder = 'mpm3d/iter{:04d}/'.format(iter)
                os.makedirs(folder, exist_ok=True)
                for i in range(n_particles):
                    xs.append(x[s, i][0])
                    ys.append(x[s, i][1])
                    zs.append(x[s, i][2])
                    us.append(v[s, i][0])
                    vs.append(v[s, i][1])
                    ws.append(v[s, i][2])

                    if particle_type[i] == 0:
                        # fluid
                        r = 0.3
                        g = 0.3
                        b = 1.0
                    else:
                        # neohookean
                        if actuator_id[i] != -1:
                            # actuated
                            act = actuation[s, actuator_id[i]] * 0.5
                            r = 0.5 - act
                            g = 0.5 - abs(act)
                            b = 0.5 + act
                        else:
                            r, g, b = 0.4, 0.4, 0.4

                    color = to255(r) * 65536 + 256 * to255(g) + to255(b)
                    cs.append(color)
                data = np.array(xs + ys + zs + us + vs + ws + cs,
                                dtype=np.float32)
                data.tofile(open('{}/{:04}.bin'.format(folder, s), 'wb'))

    # ti.profiler_print()
    plt.title("Optimization of Initial Velocity")
    plt.ylabel("Loss")
    plt.xlabel("Gradient Descent Iterations")
    plt.plot(losses)
    plt.show()
Exemplo n.º 20
0
def main():
  tc.set_gdb_trigger()
  # initialization
  scene = Scene()
  # fish(scene)
  robot(scene)
  # scene.add_rect(0.4, 0.4, 0.2, 0.1, 0.3, 0.1, -1, 1)
  scene.finalize()

  for i in range(n_actuators):
    for j in range(n_sin_waves):
      weights[i, j] = np.random.randn() * 0.01

  for i in range(scene.n_particles):
    x[0, i] = scene.x[i]
    F[0, i] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
    actuator_id[i] = scene.actuator_id[i]
    particle_type[i] = scene.particle_type[i]

  fig = plt.figure()
  plt.ion()
  ax = fig.add_subplot(111, projection='3d')

  losses = []
  for iter in range(501):
    ti.clear_all_gradients()
    l = forward()
    losses.append(l)
    loss.grad[None] = 1
    backward()
    print('i=', iter, 'loss=', l)
    learning_rate = 10

    for i in range(n_actuators):
      for j in range(n_sin_waves):
        # print(weights.grad[i, j])
        weights[i, j] -= learning_rate * weights.grad[i, j]
      bias[i] -= learning_rate * bias.grad[i]

    if iter % 50 == -1:
      # visualize
      print("Dumping particles...")
      for s in range(7, steps, 2):

        def to255(x):
          return int(max(min(x * 255, 255), 0))

        xs, ys, zs = [], [], []
        us, vs, ws = [], [], []
        cs = []
        folder = 'mpm3d/iter{:04d}/'.format(iter)
        os.makedirs(folder, exist_ok=True)
        for i in range(n_particles):
          xs.append(x[s, i][0])
          ys.append(x[s, i][1])
          zs.append(x[s, i][2])
          us.append(v[s, i][0])
          vs.append(v[s, i][1])
          ws.append(v[s, i][2])

          if particle_type[i] == 0:
            # fluid
            r = 0.3
            g = 0.3
            b = 1.0
          else:
            # neohookean
            if actuator_id[i] != -1:
              # actuated
              act = actuation[s, actuator_id[i]] * 0.5
              r = 0.5 - act
              g = 0.5 - abs(act)
              b = 0.5 + act
            else:
              r, g, b = 0.4, 0.4, 0.4

          color = to255(r) * 65536 + 256 * to255(g) + to255(b)
          cs.append(color)
        data = np.array(xs + ys + zs + us + vs + ws + cs, dtype=np.float32)
        data.tofile(open('{}/{:04}.bin'.format(folder, s), 'wb'))
      print("Particles dumped")
Exemplo n.º 21
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         _test_mode=False,
         **kwargs):
    import taichi as ti

    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    ti.reset()

    spec_cfg = _SpecialConfig()
    env_comp = _EnvironmentConfigurator(kwargs, ti.cfg)
    env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)

    # configure default_fp/ip:
    # TODO: move these stuff to _SpecialConfig too:
    env_default_fp = os.environ.get("TI_DEFAULT_FP")
    if env_default_fp:
        if default_fp is not None:
            core.warn(
                f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
            )
        if env_default_fp == '32':
            default_fp = f32
        elif env_default_fp == '64':
            default_fp = f64
        elif env_default_fp is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')

    env_default_ip = os.environ.get("TI_DEFAULT_IP")
    if env_default_ip:
        if default_ip is not None:
            core.warn(
                f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
            )
        if env_default_ip == '32':
            default_ip = i32
        elif env_default_ip == '64':
            default_ip = i64
        elif env_default_ip is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')

    if default_fp is not None:
        ti.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        ti.get_runtime().set_default_ip(default_ip)

    # submodule configurations (spec_cfg):
    env_spec.add('print_preprocessed')
    env_spec.add('log_level', str)
    env_spec.add('gdb_trigger')
    env_spec.add('excepthook')

    # compiler configurations (ti.cfg):
    for key in dir(ti.cfg):
        if key in ['arch', 'default_fp', 'default_ip']:
            continue
        cast = type(getattr(ti.cfg, key))
        if cast is bool:
            cast = None
        env_comp.add(key, cast)

    unexpected_keys = kwargs.keys()
    if len(unexpected_keys):
        raise KeyError(
            f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
        )

    # dispatch configurations that are not in ti.cfg:
    if not _test_mode:
        ti.set_gdb_trigger(spec_cfg.gdb_trigger)
        ti.get_runtime().print_preprocessed = spec_cfg.print_preprocessed
        ti.set_logging_level(spec_cfg.log_level.lower())
        if spec_cfg.excepthook:
            # TODO(#1405): add a way to restore old excepthook
            ti.enable_excepthook()

    # select arch (backend):
    env_arch = os.environ.get('TI_ARCH')
    if env_arch is not None:
        ti.info(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = ti.core.arch_from_name(env_arch)
    ti.cfg.arch = adaptive_arch_select(arch)
    print(f'[Taichi] Starting on arch={ti.core.arch_name(ti.cfg.arch)}')

    if _test_mode:
        return spec_cfg

    # create a new program:
    ti.get_runtime().create_program()
Exemplo n.º 22
0
def main():
    tc.set_gdb_trigger()
    # initialization
    scene = Scene()
    # fish(scene)
    robot(scene)
    # scene.add_rect(0.4, 0.4, 0.2, 0.1, 0.3, 0.1, -1, 1)
    scene.finalize()

    for i in range(n_actuators):
        for j in range(n_sin_waves):
            weights[i, j] = np.random.randn() * 0.01

    for i in range(scene.n_particles):
        x[0, i] = scene.x[i]
        F[0, i] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
        actuator_id[i] = scene.actuator_id[i]
        particle_type[i] = scene.particle_type[i]

    losses = []
    for iter in range(100):
        t = time.time()
        ti.clear_all_gradients()
        l = forward()
        losses.append(l)
        loss.grad[None] = 1
        backward()
        per_iter_time = time.time() - t
        print('i=', iter, 'loss=', l, F' per iter {per_iter_time:.2f}s')
        learning_rate = 30

        for i in range(n_actuators):
            for j in range(n_sin_waves):
                weights[i, j] -= learning_rate * weights.grad[i, j]
            bias[i] -= learning_rate * bias.grad[i]

        if iter % 20 == 19:
            print('Writing particle data to disk...')
            print('(Please be patient)...')
            # visualize
            forward()
            x_ = x.to_numpy()
            v_ = v.to_numpy()
            particle_type_ = particle_type.to_numpy()
            actuation_ = actuation.to_numpy()
            actuator_id_ = actuator_id.to_numpy()
            folder = 'mpm3d/iter{:04d}/'.format(iter)
            os.makedirs(folder, exist_ok=True)
            for s in range(7, steps, 2):
                xs, ys, zs = [], [], []
                us, vs, ws = [], [], []
                cs = []
                for i in range(n_particles):
                    xs.append(x_[s, i][0])
                    ys.append(x_[s, i][1])
                    zs.append(x_[s, i][2])
                    us.append(v_[s, i][0])
                    vs.append(v_[s, i][1])
                    ws.append(v_[s, i][2])

                    if particle_type_[i] == 0:
                        # fluid
                        r = 0.3
                        g = 0.3
                        b = 1.0
                    else:
                        # neohookean
                        if actuator_id_[i] != -1:
                            # actuated
                            act = actuation_[s, actuator_id_[i]] * 0.5
                            r = 0.5 - act
                            g = 0.5 - abs(act)
                            b = 0.5 + act
                        else:
                            r, g, b = 0.4, 0.4, 0.4

                    cs.append(ti.rgb_to_hex((r, g, b)))
                data = np.array(xs + ys + zs + us + vs + ws + cs,
                                dtype=np.float32)
                fn = '{}/{:04}.bin'.format(folder, s)
                data.tofile(open(fn, 'wb'))
                print('.', end='')
            print()

    plt.title("Optimization of Initial Velocity")
    plt.ylabel("Loss")
    plt.xlabel("Gradient Descent Iterations")
    plt.plot(losses)
    plt.show()
Exemplo n.º 23
0
import taichi as ti
import taichi as tc
import matplotlib.pyplot as plt
import random
import numpy as np

tc.set_gdb_trigger(True)

number_coeffs = 4
learning_rate = 1e-4

N = 32
x, y = ti.field(ti.f32, shape=N, needs_grad=True), ti.field(ti.f32,
                                                            shape=N,
                                                            needs_grad=True)
coeffs = ti.field(ti.f32, shape=number_coeffs, needs_grad=True)
loss = ti.field(ti.f32, shape=(), needs_grad=True)


@ti.kernel
def regress():
    for i in x:
        v = x[i]
        est = 0.0
        for j in ti.static(range(number_coeffs)):
            est += coeffs[j] * (v**j)
        loss[None] += 0.5 * (y[i] - est)**2


@ti.kernel
def update():
Exemplo n.º 24
0
import taichi as tc

cdf = False

r = 125
tc.set_gdb_trigger()

if __name__ == '__main__':
    res = (r, r, r)
    mpm = tc.dynamics.MPM(res=res,
                          base_delta_t=1e-2,
                          gravity=0,
                          clean_boundary=False,
                          frame_dt=1e-2,
                          benchmark_resample=False,
                          num_threads=1,
                          optimized=True)

    mpm.add_particles(
        #benchmark=125,
        benchmark=8000,
        type='linear',
        initial_velocity=(0, 0, 0),
        E=1e2)

    mpm.simulate(clear_output_directory=True, print_profile_info=True)
Exemplo n.º 25
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         print_preprocessed=None,
         debug=None,
         **kwargs):
    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    import taichi as ti
    ti.reset()

    if default_fp is None:  # won't override
        dfl_fp = os.environ.get("TI_DEFAULT_FP")
        if dfl_fp == 32:
            default_fp = core.DataType.f32
        elif dfl_fp == 64:
            default_fp = core.DataType.f64
        elif dfl_fp is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_FP: {dfl_fp}, should be 32 or 64')
    if default_ip is None:
        dfl_ip = os.environ.get("TI_DEFAULT_IP")
        if dfl_ip == 32:
            default_ip = core.DataType.i32
        elif dfl_ip == 64:
            default_ip = core.DataType.i64
        elif dfl_ip is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_IP: {dfl_ip}, should be 32 or 64')

    if print_preprocessed is None:  # won't override
        print_preprocessed = os.environ.get("TI_PRINT_PREPROCESSED")
        if print_preprocessed is not None:
            print_preprocessed = bool(int(print_preprocessed))

    if default_fp is not None:
        ti.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        ti.get_runtime().set_default_ip(default_ip)
    if print_preprocessed is not None:
        ti.get_runtime().print_preprocessed = print_preprocessed

    if debug is None:
        debug = bool(int(os.environ.get('TI_DEBUG', '0')))
    if debug:
        ti.set_logging_level(ti.TRACE)
    ti.cfg.debug = debug

    unified_memory = os.environ.get('TI_USE_UNIFIED_MEMORY', '')
    if unified_memory != '':
        use_unified_memory = bool(int(unified_memory))
        ti.cfg.use_unified_memory = use_unified_memory
        if not use_unified_memory:
            ti.trace(
                'Unified memory disabled (env TI_USE_UNIFIED_MEMORY=0). This is experimental.'
            )

    for k, v in kwargs.items():
        setattr(ti.cfg, k, v)

    def bool_int(x):
        return bool(int(x))

    def environ_config(key, cast=bool_int):
        name = 'TI_' + key.upper()
        value = os.environ.get(name, '')
        if len(value):
            setattr(ti.cfg, key, cast(value))

        # TI_ASYNC=   : not work
        # TI_ASYNC=0  : False
        # TI_ASYNC=1  : True

    # does override
    environ_config("print_ir")
    environ_config("verbose")
    environ_config("fast_math")
    environ_config("async")
    environ_config("print_benchmark_stat")
    environ_config("device_memory_fraction", float)
    environ_config("device_memory_GB", float)

    # Q: Why not environ_config("gdb_trigger")?
    # A: We don't have ti.cfg.gdb_trigger yet.
    # Discussion: https://github.com/taichi-dev/taichi/pull/879
    gdb_trigger = os.environ.get('TI_GDB_TRIGGER', '')
    if len(gdb_trigger):
        ti.set_gdb_trigger(bool(int(gdb_trigger)))

    # Q: Why not environ_config("arch", ti.core.arch_from_name)?
    # A: We need adaptive_arch_select for all.
    env_arch = os.environ.get("TI_ARCH")
    if env_arch is not None:
        print(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = ti.core.arch_from_name(env_arch)

    ti.cfg.arch = adaptive_arch_select(arch)

    log_level = os.environ.get("TI_LOG_LEVEL")
    if log_level is not None:
        ti.set_logging_level(log_level.lower())

    ti.get_runtime().create_program()
Exemplo n.º 26
0
def init(default_fp=None,
         default_ip=None,
         print_preprocessed=None,
         debug=None,
         **kwargs):
    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    import taichi as ti
    ti.reset()

    if default_fp is None:  # won't override
        dfl_fp = os.environ.get("TI_DEFAULT_FP")
        if dfl_fp == 32:
            default_fp = core.DataType.f32
        elif dfl_fp == 64:
            default_fp = core.DataType.f64
        elif dfl_fp is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_FP: {dfl_fp}, should be 32 or 64')
    if default_ip is None:
        dfl_ip = os.environ.get("TI_DEFAULT_IP")
        if dfl_ip == 32:
            default_ip = core.DataType.i32
        elif dfl_ip == 64:
            default_ip = core.DataType.i64
        elif dfl_ip is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_IP: {dfl_ip}, should be 32 or 64')

    if print_preprocessed is None:  # won't override
        print_preprocessed = os.environ.get("TI_PRINT_PREPROCESSED")
        if print_preprocessed is not None:
            print_preprocessed = bool(int(print_preprocessed))

    if default_fp is not None:
        ti.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        ti.get_runtime().set_default_ip(default_ip)
    if print_preprocessed is not None:
        ti.get_runtime().print_preprocessed = print_preprocessed

    if debug is None:
        debug = bool(int(os.environ.get('TI_DEBUG', '0')))
    if debug:
        ti.set_logging_level(ti.TRACE)
    ti.cfg.debug = debug

    unified_memory = os.environ.get('TI_USE_UNIFIED_MEMORY', '')
    if unified_memory != '':
        use_unified_memory = bool(int(unified_memory))
        ti.cfg.use_unified_memory = use_unified_memory
        if not use_unified_memory:
            ti.trace(
                'Unified memory disabled (env TI_USE_UNIFIED_MEMORY=0). This is experimental.'
            )

    for k, v in kwargs.items():
        setattr(ti.cfg, k, v)

    def boolean_config(key, name=None):
        if name is None:
            name = 'TI_' + key.upper()
        value = os.environ.get(name)
        if value is not None:
            setattr(ti.cfg, key, len(value) and bool(int(value)))

    # does override
    boolean_config("print_ir")
    boolean_config("verbose")
    boolean_config("fast_math")
    boolean_config("async")
    gdb_trigger = os.environ.get("TI_GDB_TRIGGER")
    if gdb_trigger is not None:
        ti.set_gdb_trigger(len(gdb_trigger) and bool(int(gdb_trigger)))
    arch = os.environ.get("TI_ARCH")
    if arch is not None:
        print(f'Following TI_ARCH setting up for arch={arch}')
        ti.cfg.arch = ti.core.arch_from_name(arch)

    log_level = os.environ.get("TI_LOG_LEVEL")
    if log_level is not None:
        ti.set_logging_level(log_level.lower())

    ti.get_runtime().create_program()
Exemplo n.º 27
0
def test_cpu_debug_snode_writer_out_of_bound():
    ti.init(arch=ti.x64, debug=True)
    ti.set_gdb_trigger(False)

    x = ti.var(ti.f32, shape=3)
    x[3] = 10.0
Exemplo n.º 28
0
def test_cpu_debug_snode_reader_out_of_bound_negative():
    ti.init(arch=ti.x64, debug=True)
    ti.set_gdb_trigger(False)

    x = ti.var(ti.f32, shape=3)
    a = x[-1]
Exemplo n.º 29
0
def test_cpu_debug_snode_writer_out_of_bound_negative():
    ti.set_gdb_trigger(False)

    x = ti.field(ti.f32, shape=3)
    with pytest.raises(RuntimeError):
        x[-1] = 10.0
Exemplo n.º 30
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         _test_mode=False,
         enable_fallback=True,
         **kwargs):
    """Initializes the Taichi runtime.

    This should always be the entry point of your Taichi program. Most
    importantly, it sets the backend used throughout the program.

    Args:
        arch: Backend to use. This is usually :const:`~taichi.lang.cpu` or :const:`~taichi.lang.gpu`.
        default_fp (Optional[type]): Default floating-point type.
        default_ip (Optional[type]): Default integral type.
        **kwargs: Taichi provides highly customizable compilation through
            ``kwargs``, which allows for fine grained control of Taichi compiler
            behavior. Below we list some of the most frequently used ones. For a
            complete list, please check out
            https://github.com/taichi-dev/taichi/blob/master/taichi/program/compile_config.h.

            * ``cpu_max_num_threads`` (int): Sets the number of threads used by the CPU thread pool.
            * ``debug`` (bool): Enables the debug mode, under which Taichi does a few more things like boundary checks.
            * ``print_ir`` (bool): Prints the CHI IR of the Taichi kernels.
            * ``packed`` (bool): Enables the packed memory layout. See https://docs.taichi.graphics/lang/articles/advanced/layout.
    """
    # Check version for users every 7 days if not disabled by users.
    skip = os.environ.get("TI_SKIP_VERSION_CHECK")
    if skip != 'ON':
        try_check_version()

    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    ti.reset()

    spec_cfg = _SpecialConfig()
    env_comp = _EnvironmentConfigurator(kwargs, ti.cfg)
    env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)

    # configure default_fp/ip:
    # TODO: move these stuff to _SpecialConfig too:
    env_default_fp = os.environ.get("TI_DEFAULT_FP")
    if env_default_fp:
        if default_fp is not None:
            _ti_core.warn(
                f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
            )
        if env_default_fp == '32':
            default_fp = ti.f32
        elif env_default_fp == '64':
            default_fp = ti.f64
        elif env_default_fp is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')

    env_default_ip = os.environ.get("TI_DEFAULT_IP")
    if env_default_ip:
        if default_ip is not None:
            _ti_core.warn(
                f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
            )
        if env_default_ip == '32':
            default_ip = ti.i32
        elif env_default_ip == '64':
            default_ip = ti.i64
        elif env_default_ip is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')

    if default_fp is not None:
        impl.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        impl.get_runtime().set_default_ip(default_ip)

    # submodule configurations (spec_cfg):
    env_spec.add('print_preprocessed')
    env_spec.add('log_level', str)
    env_spec.add('gdb_trigger')
    env_spec.add('excepthook')
    env_spec.add('experimental_real_function')
    env_spec.add('short_circuit_operators')

    # compiler configurations (ti.cfg):
    for key in dir(ti.cfg):
        if key in ['arch', 'default_fp', 'default_ip']:
            continue
        _cast = type(getattr(ti.cfg, key))
        if _cast is bool:
            _cast = None
        env_comp.add(key, _cast)

    unexpected_keys = kwargs.keys()

    if len(unexpected_keys):
        raise KeyError(
            f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
        )

    # dispatch configurations that are not in ti.cfg:
    if not _test_mode:
        ti.set_gdb_trigger(spec_cfg.gdb_trigger)
        impl.get_runtime().print_preprocessed = spec_cfg.print_preprocessed
        impl.get_runtime().experimental_real_function = \
            spec_cfg.experimental_real_function
        impl.get_runtime().short_circuit_operators = \
            spec_cfg.short_circuit_operators
        ti.set_logging_level(spec_cfg.log_level.lower())
        if spec_cfg.excepthook:
            # TODO(#1405): add a way to restore old excepthook
            ti.enable_excepthook()

    # select arch (backend):
    env_arch = os.environ.get('TI_ARCH')
    if env_arch is not None:
        ti.info(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = _ti_core.arch_from_name(env_arch)
    ti.cfg.arch = adaptive_arch_select(arch, enable_fallback, ti.cfg.use_gles)
    if ti.cfg.arch == cc:
        _ti_core.set_tmp_dir(locale_encode(prepare_sandbox()))
    print(f'[Taichi] Starting on arch={_ti_core.arch_name(ti.cfg.arch)}')

    # Torch based ndarray on opengl backend allocates memory on host instead of opengl backend.
    # So it won't work.
    if ti.cfg.arch == opengl and ti.cfg.ndarray_use_torch:
        ti.warn(
            'Opengl backend doesn\'t support torch based ndarray. Setting ndarray_use_torch to False.'
        )
        ti.cfg.ndarray_use_torch = False

    if _test_mode:
        return spec_cfg

    get_default_kernel_profiler().set_kernel_profiler_mode(
        ti.cfg.kernel_profiler)

    # create a new program:
    impl.get_runtime().create_program()

    ti.trace('Materializing runtime...')
    impl.get_runtime().prog.materialize_runtime()

    impl._root_fb = FieldsBuilder()

    if not os.environ.get("TI_DISABLE_SIGNAL_HANDLERS", False):
        impl.get_runtime()._register_signal_handlers()

    return None