예제 #1
0
파일: evp.py 프로젝트: fehomi/timemachine
def test0():

    onp.random.seed(2020)

    for trip in range(10):
        print("trip", trip)
        N = 50
        x_a = onp.random.rand(N, 3)

        a_com, a_tensor = inertia_tensor(x_a, onp.ones(N, dtype=np.float64))

        onp_res = onp.linalg.eigh(a_tensor)
        w = onp_res[0]
        Q = onp_res[1]
        for d in range(3):
            onp.testing.assert_almost_equal(np.matmul(a_tensor, Q[:, d]),
                                            w[d] * Q[:, d])

        jnp_res = np.linalg.eigh(a_tensor)
        evp_res = dsyevv3(a_tensor)

        np.set_printoptions(
            formatter={"float": lambda x: "{0:0.16f}".format(x)})

        onp.testing.assert_almost_equal(onp_res[0], jnp_res[0])
        onp.testing.assert_almost_equal(onp_res[1], jnp_res[1])

        onp.testing.assert_almost_equal(onp_res[0], evp_res[0])
        onp.testing.assert_almost_equal(onp.abs(onp_res[1]),
                                        onp.abs(evp_res[1]))
예제 #2
0
def main(_):
    jnp.set_printoptions(precision=3, suppress=True)

    shape = (15, 1)
    index_points = jnp.linspace(-3., 3., shape[0])[:, None]

    rng = random.PRNGKey(123)

    y = (jnp.sin(index_points)[:, 0] + 0.33 * random.normal(rng, (15, )))

    train_ds = {'index_points': index_points, 'y': y}

    optimizer = train(train_ds)

    if FLAGS.plot:
        import matplotlib.pyplot as plt
        model = optimizer.target

        def inducing_loc_init(key, shape):
            return random.uniform(key, shape, minval=-3., maxval=3.)

        xx_pred = jnp.linspace(-3., 5.)[:, None]

        _, vgp = model(xx_pred, inducing_loc_init)

        pred_m = vgp.mean_function(xx_pred)
        pred_v = jnp.diag(vgp.kernel_function(xx_pred, xx_pred))

        fig, ax = plt.subplots(figsize=(6, 4))

        ax.fill_between(xx_pred[:, 0],
                        pred_m - 2 * jnp.sqrt(pred_v),
                        pred_m + 2 * jnp.sqrt(pred_v),
                        alpha=0.5)
        ax.plot(xx_pred[:, 0],
                pred_m,
                '-',
                label=r'$\mathbb{E}_{f \sim q(f)}[f(x)]$')
        ax.plot(model.params['inducing_var']['locations'][:, 0],
                model.params['inducing_var']['mean'],
                '+',
                label=r'$E_{u \sim q(u)}[u]$')
        ax.plot(train_ds['index_points'][:, 0],
                train_ds['y'],
                'ks',
                label='observations')
        ax.legend()
        plt.show()
예제 #3
0
def test_cube1():
    np.set_printoptions(precision=3)
    old_model = Model0(dim=2, bounds=np.array([3.0, 6.0]))  # this works
    model = FastRefCubeModel(dim=2, A_type='bi_value_A')
    import numpy

    q = np.array([10.0, 0.0])
    p = np.array([-1.0, -0.0])
    first_x, min_time_x, dim_x = model.first_discontinuity_x_tx_dimx(q=q, p=p)
    old_first_x, old_min_time_x, old_dim_x = old_model.first_discontinuity_x_tx_dimx(
        q=q, p=p)
    print('first_x: {},\t\t min_time_x: {},\t\t dim_x: {} '.format(
        first_x, min_time_x, dim_x))
    print('old_first_x: {},\t old_min_time_x: {},\t old_dim_x: {} '.format(
        old_first_x, old_min_time_x, old_dim_x))

    # exit()

    collision_q0 = []
    collision_q1 = []
    for i in tqdm(range(2000)):
        q = np.array([
            numpy.random.uniform(-30.30, 30.30),
            numpy.random.uniform(-30.30, 30.30)
        ])
        p = np.array(
            [numpy.random.uniform(-10, 10),
             numpy.random.uniform(-10, 10)])

        first_x, _, _ = model.first_discontinuity_x_tx_dimx(q=q, p=p)

        if first_x is not None:
            collision_q0.append(first_x[0])
            collision_q1.append(first_x[1])

    import matplotlib.pyplot as plt
    plt.scatter(numpy.array(collision_q0),
                numpy.array(collision_q1),
                alpha=0.5,
                s=1)
    plt.show()
예제 #4
0
    ress = anp.divide(numerator, divider)

    return anp.log(ress)


ress = autograd.grad(funcc)

# arg = jax.random.normal(jax.random.PRNGKey(1), (NUM_ARG, 0))

arg = onp.random.rand(NUM_ARG, 1)
real_args = np.asarray(arg)
print("... ... ...Start calculating Gradient... ... ...")

# jit
np.set_printoptions(precision=16)
# anp.set_printoptions(precision=16)
start = time.time()
print(res(real_args))
print(type(res(real_args)))
end = time.time()
print("First Execution time using jit: ", end-start, " s ... ... ...")

# # autograd
# start = time.time()
# print(ress(arg))
# end = time.time()
# print("First Execution time using autograd: ", end-start, " s ... ... ...")

for step in range(10):
    arg = onp.random.rand(NUM_ARG, 1)
예제 #5
0
import pytest

import jax.numpy as jnp

from numpyro.contrib.einstein.kernels import (
    GraphicalKernel,
    HessianPrecondMatrix,
    IMQKernel,
    LinearKernel,
    MixtureKernel,
    PrecondMatrixKernel,
    RandomFeatureKernel,
    RBFKernel
)

jnp.set_printoptions(precision=100)
T = namedtuple('TestSteinKernel', ['kernel', 'particle_info', 'loss_fn', 'kval'])

PARTICLES_2D = jnp.array([[1., 2.], [-10., 10.], [7., 3.], [2., -1]])

TPARTICLES_2D = (jnp.array([1., 2.]), jnp.array([10., 5.]))  # transformed particles

TEST_CASES = [
    T(RBFKernel,
      lambda d: {},
      lambda x: x,
      {'norm': 0.040711474,
       'vector': jnp.array([0.056071877, 0.7260586]),
       'matrix': jnp.array([[0.040711474, 0.],
                            [0., 0.040711474]])}
      ),
예제 #6
0
from jax.config import config
config.update("jax_enable_x64", True)


@jax.jit
def rosenbrock(x):
    res = jnp.sum(100 * (x[1:] - x[:-1]**2)**2 + (1 - x[:1])**2)
    return res


def mccormick(v):
    res = jnp.sin(v[0] + v[1]) + (v[0] - v[1])**2 - 1.5 * v[0] + 2.5 * v[1] + 1
    return res


jnp.set_printoptions(precision=8, linewidth=200)

rng = jax.random.PRNGKey(12)
guess = jax.random.uniform(rng, shape=(6, ), minval=0., maxval=3.)
x_min, f_min = minimize(rosenbrock,
                        guess,
                        max_iter=500,
                        rel_tol=1e-9,
                        verbose=True)

x_min, f_min = minimize(mccormick,
                        jnp.array([0., 0.]),
                        max_iter=500,
                        rel_tol=1e-10,
                        verbose=True)
print(x_min, f_min)
예제 #7
0
    for msg in config.IGNORE_WARNINGS:
        warnings.filterwarnings("ignore", message=msg)
    import jax.numpy as np

    def array2string(array, max_length=config.NUMPY_THRESHOLD):
        """ array2string is not implemented in jax.numpy """
        flat = list(array)
        flat = flat if len(flat) <= max_length else\
            flat[:max_length // 2] + ["..."] + flat[1 - max_length // 2:]
        return "[{}]".format(", ".join(map(str, flat)))

    np.array2string = array2string
else:
    import numpy as np
    from numpy import array2string as _array2string
    np.set_printoptions(threshold=config.NUMPY_THRESHOLD)

    def array2string(array, **params):
        """ makes sure we get the same doctest with numpy and jax.numpy """
        return _array2string(array, **dict(params, separator=', '))\
            .replace('[ ', '[').replace('  ', ' ')

    np.array2string = array2string


class Dim(Ty):
    """ Implements dimensions as tuples of positive integers.
    Dimensions form a monoid with product @ and unit Dim(1).

    >>> Dim(1) @ Dim(2) @ Dim(3)
    Dim(2, 3)
    loss_list.append(current_loss)

    for step in range(num_iter):
        opt_state = update(step, opt_state, data)
        A = get_params(opt_state)[0][0]

        current_loss = cost_function_fidelity(get_params(opt_state), *data)
        print(step, 1. - current_loss)
        loss_list.append(current_loss)

    return loss_list


if __name__ == "__main__":
    onp.random.seed(1)
    np.set_printoptions(linewidth=2000, precision=5, threshold=4000)

    args = parse_args.parse_args()

    L = args.L
    J = 1.
    depth = args.depth
    N_iter = args.N_iter
    T = args.T  # the target state is corresponding to time T.

    save_each = 100
    tol = 1e-12
    cov_crit = tol * 0.1
    max_N_iter = N_iter

    Sz_list = [np.array([[1, 0.], [0., -1.]]) for i in range(L)]
예제 #9
0
import jax.numpy as jnp
from jax.config import config
config.update('jax_enable_x64', True)
jnp.set_printoptions(precision=4)

import jax.scipy as jsc
from jax import jacfwd, jacrev, grad, jit, vmap, random, ops
import time
import numpy as np


def Tk(K, c, Shat, p):
    nc = [i for i in range(p) if i not in c]
    idx_cc = ([x for x in c for y in c], [y for x in c for y in c])
    Kcc = jnp.linalg.inv(Shat[c, :][:, c]) + K[c, :][:, nc] @ jsc.linalg.solve(
        K[nc, :][:, nc], K[nc, :][:, c])
    return ops.index_update(K, idx_cc, Kcc.flatten())


def T(K, C, Shat):
    p = K.shape[0]
    for c in C:
        K = Tk(K, c, Shat, p)
    return K


def itpropscaling(C, Shat):
    p = Shat.shape[0]
    tol = 1e-8
    K = jnp.eye(p)
    err = 1
예제 #10
0
try:  # pragma: no cover
    import warnings
    for msg in messages.IGNORE_WARNINGS:
        warnings.filterwarnings("ignore", message=msg)
    import jax.numpy as np
    def array2string(array, max_length=messages.NUMPY_THRESHOLD):
        """ array2string is not implemented in jax.numpy """
        ls = list(array)
        if len(ls) > max_length:
            ls = ls[:max_length // 2] + ["..."] + ls[1 - max_length // 2:]
        return "[{}]".format(", ".join(map(str, ls)))
    np.array2string = array2string
except ImportError:  # pragma: no cover
    import numpy as np
    from numpy import array2string as _array2string
    np.set_printoptions(threshold=messages.NUMPY_THRESHOLD)
    def array2string(array, **params):
        """ makes sure we get the same doctest with numpy and jax.numpy """
        return _array2string(array, separator=', ', **params)\
            .replace('[ ', '[').replace('  ',  ' ')
    np.array2string = array2string


class Dim(Ty):
    """ Implements dimensions as tuples of positive integers.
    Dimensions form a monoid with product @ and unit Dim(1).

    >>> Dim(1) @ Dim(2) @ Dim(3)
    Dim(2, 3)
    """
    def __init__(self, *dims):
예제 #11
0
def train_and_evaluate(config, workdir):
    """Runs a training and evaluation loop.

  Args:
    config: Configuration to use.
    workdir: Working directory for checkpoints and TF summaries. If this
      contains checkpoint training will be resumed from the latest checkpoint.
  """
    if config.dataset.batch_size % jax.device_count() != 0:
        raise ValueError(
            "Batch size must be divisible by the number of devices.")

    tf.io.gfile.makedirs(workdir)
    # Deterministic training.
    rng = jax.random.PRNGKey(config.seed)
    # Shift the numpy random seed by process_index() to shuffle data loaded
    # by different hosts
    np.random.seed(20201473 + jax.process_index())

    #----------------------------------------------------------------------------
    # Build input pipeline.
    rng, data_rng = jax.random.split(rng)
    data_rng = jax.random.fold_in(data_rng, jax.process_index())

    scene_path_list = train_utils.get_train_scene_list(config)

    train_ds = datasets.create_train_dataset(config, scene_path_list[0])
    _, eval_ds_dict = datasets.create_eval_dataset(config)
    _, eval_ds = eval_ds_dict.popitem()
    example_batch = train_ds.peek()

    #----------------------------------------------------------------------------
    # Learning rate schedule.
    num_train_steps = config.train.max_steps
    if num_train_steps == -1:
        num_train_steps = train_ds.size()
    steps_per_epoch = num_train_steps // config.train.num_epochs
    logging.info("num_train_steps=%d, steps_per_epoch=%d", num_train_steps,
                 steps_per_epoch)

    learning_rate_fn = train_utils.create_learning_rate_fn(config)

    #----------------------------------------------------------------------------
    # Initialize model.
    rng, model_rng = jax.random.split(rng)
    model, state = models.create_train_state(
        config,
        model_rng,
        learning_rate_fn=learning_rate_fn,
        example_batch=example_batch,
    )

    #----------------------------------------------------------------------------
    # Set up checkpointing of the model and the input pipeline.

    # check if the job was stopped and relaunced
    latest_ckpt = checkpoints.latest_checkpoint(workdir)
    if latest_ckpt is None:
        # No previous checkpoint. Then check for pretrained weights.
        if config.train.pretrain_dir:
            state = checkpoints.restore_checkpoint(config.train.pretrain_dir,
                                                   state)
    else:
        state = checkpoints.restore_checkpoint(workdir, state)

    initial_step = int(state.step) + 1
    step_per_scene = config.train.switch_scene_iter
    if config.dev_run:
        jnp.set_printoptions(precision=2)
        np.set_printoptions(precision=2)
        step_per_scene = 3

    #----------------------------------------------------------------------------
    # Distribute training.
    state = flax_utils.replicate(state)
    p_train_step = jax.pmap(
        functools.partial(
            train_step,
            model=model,
            learning_rate_fn=learning_rate_fn,
            weight_decay=config.train.weight_decay,
            config=config,
        ),
        axis_name="batch",
    )

    # Get distributed rendering function
    render_pfn = render_utils.get_render_function(
        model=model,
        config=config,
        randomized=False,  # No randomization for evaluation.
    )

    #----------------------------------------------------------------------------
    # Prepare Metric Writers
    writer = metric_writers.create_default_writer(
        workdir, just_logging=jax.process_index() > 0)
    if initial_step == 1:
        writer.write_hparams(dict(config))

    logging.info("Starting training loop at step %d.", initial_step)
    hooks = []
    report_progress = periodic_actions.ReportProgress(
        num_train_steps=num_train_steps, writer=writer)
    if jax.process_index() == 0:
        hooks += [
            report_progress,
        ]
    train_metrics = None

    # Prefetch_buffer_size = 6 x batch_size
    ptrain_ds = flax.jax_utils.prefetch_to_device(train_ds, 6)
    n_local_devices = jax.local_device_count()
    rng = rng + jax.process_index()  # Make random seed separate across hosts.
    keys = jax.random.split(rng, n_local_devices)  # For pmapping RNG keys.

    with metric_writers.ensure_flushes(writer):
        for step in range(initial_step, num_train_steps + 1):
            # `step` is a Python integer. `state.step` is JAX integer on the GPU/TPU
            # devices.
            if step % step_per_scene == 0:
                scene_idx = np.random.randint(len(scene_path_list))
                logging.info("Loading scene {}".format(
                    scene_path_list[scene_idx]))  # pylint: disable=logging-format-interpolation
                curr_scene = scene_path_list[scene_idx]
                if config.dataset.name == "dtu":
                    # lighting can take values between 0 and 6 (both included)
                    config.dataset.dtu_light_idx = np.random.randint(low=0,
                                                                     high=7)
                train_ds = datasets.create_train_dataset(config, curr_scene)
                ptrain_ds = flax.jax_utils.prefetch_to_device(train_ds, 6)

            is_last_step = step == num_train_steps
            with jax.profiler.StepTraceAnnotation("train", step_num=step):
                batch = next(ptrain_ds)
                state, metrics_update, keys = p_train_step(rng=keys,
                                                           state=state,
                                                           batch=batch)
                metric_update = flax_utils.unreplicate(metrics_update)
                train_metrics = (metric_update if train_metrics is None else
                                 train_metrics.merge(metric_update))
            # Quick indication that training is happening.
            logging.log_first_n(logging.INFO, "Finished training step %d.", 5,
                                step)
            for h in hooks:
                h(step)

            if step % config.train.log_loss_every_steps == 0 or is_last_step:
                writer.write_scalars(step, train_metrics.compute())
                train_metrics = None

            if step % config.train.render_every_steps == 0 or is_last_step:
                test_batch = next(eval_ds)
                test_pixels = model_utils.uint2float(
                    test_batch.target_view.rgb)  # extract for evaluation
                with report_progress.timed("eval"):
                    pred_color, pred_disp, pred_acc = eval_step(
                        state, keys[0], test_batch, render_pfn, config)
                #------------------------------------------------------------------
                # Log metrics and images for host 0
                #------------------------------------------------------------------
                if jax.process_index() == 0:
                    psnr = model_utils.compute_psnr(
                        ((pred_color - test_pixels)**2).mean())
                    ssim = 0.
                    writer.write_scalars(
                        step, {
                            "train_eval/test_psnr": psnr,
                            "train_eval/test_ssim": ssim,
                        })
                    writer.write_images(
                        step, {
                            "test_pred_color": pred_color[None, :],
                            "test_target": test_pixels[None, :]
                        })
                    if pred_disp is not None:
                        writer.write_images(
                            step, {"test_pred_disp": pred_disp[None, :]})
                    if pred_acc is not None:
                        writer.write_images(
                            step, {"test_pred_acc": pred_acc[None, :]})
                #------------------------------------------------------------------

            if (jax.process_index()
                    == 0) and (step % config.train.checkpoint_every_steps == 0
                               or is_last_step):
                # Write final metrics to file
                with file_utils.open_file(
                        os.path.join(workdir, "train_logs.json"), "w") as f:
                    log_dict = metric_update.compute()
                    for k, v in log_dict.items():
                        log_dict[k] = v.item()
                    f.write(json.dumps(log_dict))
                with report_progress.timed("checkpoint"):
                    state_to_save = jax.device_get(
                        jax.tree_map(lambda x: x[0], state))
                    checkpoints.save_checkpoint(workdir,
                                                state_to_save,
                                                step,
                                                keep=100)

    logging.info("Finishing training at step %d", num_train_steps)