Esempio n. 1
0
class ExperimentalOptimizersEquivalenceTest(chex.TestCase):
    def setUp(self):
        super().setUp()
        self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
        self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))

    @chex.all_variants()
    @parameterized.named_parameters(
        ('sgd', alias.sgd(LR, 0.0), optimizers.sgd(LR), 1e-5),
        ('adam', alias.adam(LR, 0.9, 0.999,
                            1e-8), optimizers.adam(LR, 0.9, 0.999), 1e-4),
        ('rmsprop', alias.rmsprop(
            LR, decay=.9, eps=0.1), optimizers.rmsprop(LR, .9, 0.1), 1e-5),
        ('rmsprop_momentum', alias.rmsprop(LR, decay=.9, eps=0.1,
                                           momentum=0.9),
         optimizers.rmsprop_momentum(LR, .9, 0.1, 0.9), 1e-5),
        ('adagrad', alias.adagrad(
            LR,
            0.,
            0.,
        ), optimizers.adagrad(LR, 0.), 1e-5),
        ('sgd', alias.sgd(LR_SCHED, 0.0), optimizers.sgd(LR), 1e-5),
        ('adam', alias.adam(LR_SCHED, 0.9, 0.999,
                            1e-8), optimizers.adam(LR, 0.9, 0.999), 1e-4),
        ('rmsprop', alias.rmsprop(LR_SCHED, decay=.9, eps=0.1),
         optimizers.rmsprop(LR, .9, 0.1), 1e-5),
        ('rmsprop_momentum',
         alias.rmsprop(LR_SCHED, decay=.9, eps=0.1, momentum=0.9),
         optimizers.rmsprop_momentum(LR, .9, 0.1, 0.9), 1e-5),
        ('adagrad', alias.adagrad(
            LR_SCHED,
            0.,
            0.,
        ), optimizers.adagrad(LR, 0.), 1e-5),
    )
    def test_jax_optimizer_equivalent(self, optax_optimizer, jax_optimizer,
                                      rtol):

        # experimental/optimizers.py
        jax_params = self.init_params
        opt_init, opt_update, get_params = jax_optimizer
        state = opt_init(jax_params)
        for i in range(STEPS):
            state = opt_update(i, self.per_step_updates, state)
            jax_params = get_params(state)

        # optax
        optax_params = self.init_params
        state = optax_optimizer.init(optax_params)

        @self.variant
        def step(updates, state):
            return optax_optimizer.update(updates, state)

        for _ in range(STEPS):
            updates, state = step(self.per_step_updates, state)
            optax_params = update.apply_updates(optax_params, updates)

        # Check equivalence.
        chex.assert_tree_all_close(jax_params, optax_params, rtol=rtol)
Esempio n. 2
0
class FlaxOptimizersEquivalenceTest(chex.TestCase):

  def setUp(self):
    super().setUp()
    self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
    self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))

  @parameterized.named_parameters(
      ('sgd',
       alias.sgd(LR),
       optim.GradientDescent(LR)),
      ('momentum',
       alias.sgd(LR, momentum=0.9),
       optim.Momentum(LR, beta=0.9)),  # Different names.
      ('nesterov_momentum',
       alias.sgd(LR, momentum=0.9, nesterov=True),
       optim.Momentum(LR, beta=0.9, nesterov=True)),
      ('rmsprop',
       alias.rmsprop(LR),
       optim.RMSProp(LR)),
      ('centered_rmsprop',
       alias.rmsprop(LR, centered=True),
       optim.RMSProp(LR, centered=True)),
      ('adam',
       alias.adam(LR),
       optim.Adam(LR)),
      ('adam_w',
       alias.adamw(LR, weight_decay=1e-4),
       optim.Adam(LR, weight_decay=1e-4)),  # Different name.
      ('adagrad',
       alias.adagrad(LR, initial_accumulator_value=0.),  # Different default!
       optim.Adagrad(LR)),
      ('lamb',
       alias.lamb(LR),
       optim.LAMB(LR)),
  )
  def test_flax_optim_equivalence(self, optax_optimizer, flax_optimizer):

    # flax/optim
    flax_params = self.init_params
    flax_optimizer = flax_optimizer.create(flax_params)
    for _ in range(STEPS):
      flax_optimizer = flax_optimizer.apply_gradient(
          self.per_step_updates)
      flax_params = flax_optimizer.target

    # optax
    optax_params = self.init_params
    state = optax_optimizer.init(optax_params)
    for _ in range(STEPS):
      updates, state = optax_optimizer.update(
          self.per_step_updates, state, optax_params)
      optax_params = update.apply_updates(optax_params, updates)

    # Check equivalence.
    chex.assert_tree_all_close(flax_params, optax_params, rtol=1e-4)
Esempio n. 3
0
 def test_explicit_dtype(self, dtype):
     expected_dtype = jax.dtypes.canonicalize_dtype(
         dtype)  # None -> float32
     tx = alias.sgd(0.1, momentum=0.9, accumulator_dtype=dtype)
     trace_state, _ = tx.init(jnp.array([0.0, 0.0]))
     self.assertEqual(expected_dtype, trace_state.trace.dtype)
     tx = alias.adam(0.1, mu_dtype=dtype)
     adam_state, _ = tx.init(jnp.array([0.0, 0.0]))
     self.assertEqual(expected_dtype, adam_state.mu.dtype)
     tx = alias.adamw(0.1, mu_dtype=dtype)
     adam_state, _, _ = tx.init(jnp.array([0.0, 0.0]))
     self.assertEqual(expected_dtype, adam_state.mu.dtype)
Esempio n. 4
0
    def test_multi_steps(self):
        batch_size = 32
        x_size = 7
        # Parameters should be updated only every `k_steps` optimisation steps.
        k_steps = 4
        data = jnp.ones([batch_size, x_size])

        def get_loss(x):
            loss = jnp.sum(hk.Linear(10)(x)**2)
            return loss

        loss_init, loss_apply = hk.without_apply_rng(hk.transform(get_loss))
        params = loss_init(jax.random.PRNGKey(1915), data)

        ms_opt = wrappers.MultiSteps(alias.adam(1e-4), k_steps)
        opt_init, opt_update = ms_opt.gradient_transformation()

        # Put the training in one function, to check that the update is indeed
        # jittable.
        def train_step(data, opt_state, params):
            grad = jax.grad(loss_apply)(params, data)
            updates, opt_state = opt_update(grad, opt_state, params)
            return updates, opt_state

        opt_state = opt_init(params)

        prev_loss = loss_apply(params, data)
        for idx in range(5 * k_steps):
            updates, opt_state = self.variant(train_step)(data, opt_state,
                                                          params)
            new_params = update.apply_updates(params, updates)
            new_loss = loss_apply(new_params, data)
            if idx % k_steps < k_steps - 1:
                # The parameters should not have changed and the loss should be
                # constant.
                jax.tree_multimap(np.testing.assert_array_equal, new_params,
                                  params)
                np.testing.assert_equal(new_loss, prev_loss)
                self.assertFalse(ms_opt.has_updated(opt_state))
            else:
                # This is a step where parameters should actually have been updated, and
                # the loss should accordingly go down.
                np.testing.assert_array_less(new_loss, prev_loss)
                prev_loss = new_loss
                self.assertTrue(ms_opt.has_updated(opt_state))
            params = new_params
Esempio n. 5
0
    def test_labels_mismatch(self, use_extra_label, use_fn):
        # The labels from label_fn must be a subet of the keys for the tx.
        params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}}
        params = jax.tree_map(jnp.asarray, params)
        label_tree = {'a': 0, 'b': [1, 0], 'c': 1}  # prefix of params

        if use_extra_label:
            label_tree['a'] = 3

        transforms = {
            0: alias.sgd(1.),
            1: alias.adam(1., b1=0., b2=0.),
            2: transform.trace(1.0)
        }
        init_fn, update_fn = combine.multi_transform(
            transforms, (lambda _: label_tree) if use_fn else label_tree)

        if use_extra_label:
            with self.assertRaises(ValueError):
                self.variant(init_fn)(params)
        else:
            state = self.variant(init_fn)(params)
            updates = jax.tree_map(lambda x: x / 10.0, params)
            self.variant(update_fn)(updates, state)
Esempio n. 6
0
def _build_simple_adam():
    # This adam behaves like an sgd, but with state.
    return alias.adam(1., b1=0., b2=0.)
Esempio n. 7
0
class AliasTest(chex.TestCase):
    @parameterized.parameters(
        ('sgd', lambda: alias.sgd(1e-2, 0.0)),
        ('adam', lambda: alias.adam(1e-1)),
        ('adamw', lambda: alias.adamw(1e-1)),
        ('lamb', lambda: alias.adamw(1e-1)),
        ('rmsprop', lambda: alias.rmsprop(1e-1)),
        ('rmsprop_momentum', lambda: alias.rmsprop(5e-2, momentum=0.9)),
        ('fromage', lambda: alias.fromage(1e-2)),
        ('adabelief', lambda: alias.adabelief(1e-1)),
        ('radam', lambda: alias.radam(1e-1)), ('sm3', lambda: alias.sm3(1.0)),
        ('yogi', lambda: alias.yogi(1.0)),
        ('dpsgd', lambda: alias.dpsgd(1e-2, 10.0, 0.001, 0)))
    def test_parabel(self, opt_name, opt):
        opt = opt()

        initial_params = jnp.array([-1.0, 10.0, 1.0])
        final_params = jnp.array([1.0, -1.0, 1.0])

        @jax.grad
        def get_updates(params):
            return jnp.sum((params - final_params)**2)

        @jax.jit
        def step(params, state):
            updates = get_updates(params)
            if opt_name == 'dpsgd': updates = updates[None]
            updates, state = opt.update(updates, state, params)
            params = update.apply_updates(params, updates)
            return params, state

        params = initial_params
        state = opt.init(params)
        for _ in range(1000):
            params, state = step(params, state)

        chex.assert_tree_all_close(params, final_params, rtol=1e-2, atol=1e-2)

    @parameterized.parameters(
        ('sgd', lambda: alias.sgd(2e-3, 0.2)),
        ('adam', lambda: alias.adam(1e-1)),
        ('adamw', lambda: alias.adamw(1e-1)),
        ('lamb', lambda: alias.adamw(1e-1)),
        ('rmsprop', lambda: alias.rmsprop(5e-3)),
        ('rmsprop_momentum', lambda: alias.rmsprop(5e-3, momentum=0.9)),
        ('fromage', lambda: alias.fromage(5e-3)),
        ('adabelief', lambda: alias.adabelief(1e-1)),
        ('radam', lambda: alias.radam(1e-3)), ('sm3', lambda: alias.sm3(1.0)),
        ('yogi', lambda: alias.yogi(1.0)),
        ('dpsgd', lambda: alias.dpsgd(2e-3, 10., 0.001, 0, 0.2)))
    def test_rosenbrock(self, opt_name, opt):
        opt = opt()

        a = 1.0
        b = 100.0
        initial_params = jnp.array([0.0, 0.0])
        final_params = jnp.array([a, a**2])

        @jax.grad
        def get_updates(params):
            return (a - params[0])**2 + b * (params[1] - params[0]**2)**2

        @jax.jit
        def step(params, state):
            updates = get_updates(params)
            if opt_name == 'dpsgd': updates = updates[None]
            updates, state = opt.update(updates, state, params)
            params = update.apply_updates(params, updates)
            return params, state

        params = initial_params
        state = opt.init(params)
        for _ in range(10000):
            params, state = step(params, state)

        chex.assert_tree_all_close(params, final_params, rtol=3e-2, atol=3e-2)
Esempio n. 8
0
class AliasTest(chex.TestCase):
    def setUp(self):
        super(AliasTest, self).setUp()
        self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
        self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))

    @chex.all_variants()
    @parameterized.named_parameters(
        ('sgd', alias.sgd(LR, 0.0), optimizers.sgd(LR), 1e-5),
        ('adam', alias.adam(LR, 0.9, 0.999,
                            1e-8), optimizers.adam(LR, 0.9, 0.999), 1e-4),
        ('rmsprop', alias.rmsprop(LR, .9, 0.1), optimizers.rmsprop(
            LR, .9, 0.1), 1e-5),
        ('adagrad', alias.adagrad(
            LR,
            0.,
            0.,
        ), optimizers.adagrad(LR, 0.), 1e-5),
    )
    def test_jax_optimizer_equivalent(self, optax_optimizer, jax_optimizer,
                                      rtol):

        # experimental/optimizers.py
        jax_params = self.init_params
        opt_init, opt_update, get_params = jax_optimizer
        state = opt_init(jax_params)
        for i in range(STEPS):
            state = opt_update(i, self.per_step_updates, state)
            jax_params = get_params(state)

        # optax
        optax_params = self.init_params
        state = optax_optimizer.init(optax_params)

        @self.variant
        def step(updates, state):
            return optax_optimizer.update(updates, state)

        for _ in range(STEPS):
            updates, state = step(self.per_step_updates, state)
            optax_params = update.apply_updates(optax_params, updates)

        # Check equivalence.
        chex.assert_tree_all_close(jax_params, optax_params, rtol=rtol)

    @parameterized.named_parameters(
        ('sgd', alias.sgd(1e-2, 0.0)),
        ('adam', alias.adam(1e-1)),
        ('adamw', alias.adamw(1e-1)),
        ('lamb', alias.adamw(1e-1)),
        ('rmsprop', alias.rmsprop(1e-1)),
        ('fromage', transform.scale_by_fromage(-1e-2)),
        ('adabelief', alias.adabelief(1e-1)),
    )
    def test_parabel(self, opt):
        initial_params = jnp.array([-1.0, 10.0, 1.0])
        final_params = jnp.array([1.0, -1.0, 1.0])

        @jax.grad
        def get_updates(params):
            return jnp.sum((params - final_params)**2)

        @jax.jit
        def step(params, state):
            updates, state = opt.update(get_updates(params), state, params)
            params = update.apply_updates(params, updates)
            return params, state

        params = initial_params
        state = opt.init(params)
        for _ in range(1000):
            params, state = step(params, state)

        chex.assert_tree_all_close(params, final_params, rtol=1e-2, atol=1e-2)

    @parameterized.named_parameters(
        ('sgd', alias.sgd(2e-3, 0.2)),
        ('adam', alias.adam(1e-1)),
        ('adamw', alias.adamw(1e-1)),
        ('lamb', alias.adamw(1e-1)),
        ('rmsprop', alias.rmsprop(5e-3)),
        ('fromage', transform.scale_by_fromage(-5e-3)),
        ('adabelief', alias.adabelief(1e-1)),
    )
    def test_rosenbrock(self, opt):
        a = 1.0
        b = 100.0
        initial_params = jnp.array([0.0, 0.0])
        final_params = jnp.array([a, a**2])

        @jax.grad
        def get_updates(params):
            return (a - params[0])**2 + b * (params[1] - params[0]**2)**2

        @jax.jit
        def step(params, state):
            updates, state = opt.update(get_updates(params), state, params)
            params = update.apply_updates(params, updates)
            return params, state

        params = initial_params
        state = opt.init(params)
        for _ in range(10000):
            params, state = step(params, state)

        chex.assert_tree_all_close(params, final_params, rtol=3e-2, atol=3e-2)
Esempio n. 9
0
class FlaxOptimizersEquivalenceTest(chex.TestCase):
    def setUp(self):
        super().setUp()
        self.init_params = (jnp.array([1., 0.1, 1., 2.]), jnp.array([3., 4.]))
        self.per_step_updates = (jnp.array([0., 0.3, 500.,
                                            5.]), jnp.array([300., 3.]))

    @parameterized.named_parameters(
        ('sgd', alias.sgd(LR), optim.GradientDescent(LR)),
        ('momentum', alias.sgd(LR, momentum=0.9), optim.Momentum(
            LR, beta=0.9)),  # Different names.
        ('nesterov_momentum', alias.sgd(LR, momentum=0.9, nesterov=True),
         optim.Momentum(LR, beta=0.9, nesterov=True)),
        ('rmsprop', alias.rmsprop(LR), optim.RMSProp(LR)),
        ('centered_rmsprop', alias.rmsprop(
            LR, centered=True), optim.RMSProp(LR, centered=True)),
        ('adam', alias.adam(LR), optim.Adam(LR)),
        ('adam_w', alias.adamw(LR, weight_decay=1e-4),
         optim.Adam(LR, weight_decay=1e-4)),  # Different name.
        (
            'adagrad',
            alias.adagrad(LR,
                          initial_accumulator_value=0.),  # Different default!
            optim.Adagrad(LR)),
        ('lamb', alias.lamb(LR), optim.LAMB(LR)),
        ('lars',
         alias.lars(LR,
                    weight_decay=.5,
                    trust_coefficient=0.003,
                    momentum=0.9,
                    eps=1e-3),
         optim.LARS(
             LR, weight_decay=.5, trust_coefficient=0.003, beta=0.9,
             eps=1e-3)),
        ('adafactor',
         alias.adafactor(learning_rate=LR / 10.,
                         factored=True,
                         multiply_by_parameter_scale=True,
                         clipping_threshold=1.0,
                         decay_rate=0.8,
                         min_dim_size_to_factor=2),
         optim.Adafactor(learning_rate=LR / 10.,
                         factored=True,
                         multiply_by_parameter_scale=True,
                         clipping_threshold=1.0,
                         decay_rate=0.8,
                         min_dim_size_to_factor=2)),
    )
    def test_flax_optim_equivalence(self, optax_optimizer, flax_optimizer):

        # flax/optim
        flax_params = self.init_params
        flax_optimizer = flax_optimizer.create(flax_params)
        for _ in range(STEPS):
            flax_optimizer = flax_optimizer.apply_gradient(
                self.per_step_updates)
            flax_params = flax_optimizer.target

        # optax
        optax_params = self.init_params
        state = optax_optimizer.init(optax_params)
        for _ in range(STEPS):
            updates, state = optax_optimizer.update(self.per_step_updates,
                                                    state, optax_params)
            optax_params = update.apply_updates(optax_params, updates)

        # Check equivalence.
        chex.assert_tree_all_close(flax_params, optax_params, rtol=2e-4)
Esempio n. 10
0
class AliasTest(chex.TestCase):
    @parameterized.product(
        (
            dict(opt_name='sgd', opt=lambda: alias.sgd(1e-3, 0.9)),
            dict(opt_name='adafactor', opt=lambda: alias.adafactor(5e-3)),
            dict(opt_name='adagrad', opt=lambda: alias.adagrad(1.0)),
            dict(opt_name='adam', opt=lambda: alias.adam(1e-1)),
            dict(opt_name='adamw', opt=lambda: alias.adamw(1e-1)),
            dict(opt_name='lars', opt=lambda: alias.lars(1.0)),
            dict(opt_name='lamb', opt=lambda: alias.lamb(1e-3)),
            dict(opt_name='noisy_sgd',
                 opt=lambda: alias.noisy_sgd(1e-3, eta=1e-4)),
            dict(opt_name='rmsprop', opt=lambda: alias.rmsprop(5e-3)),
            dict(opt_name='rmsprop_momentum',
                 opt=lambda: alias.rmsprop(5e-3, momentum=0.9)),
            dict(opt_name='fromage', opt=lambda: alias.fromage(5e-3)),
            dict(opt_name='adabelief', opt=lambda: alias.adabelief(1e-2)),
            dict(opt_name='radam', opt=lambda: alias.radam(5e-3)),
            dict(opt_name='sm3', opt=lambda: alias.sm3(1.0)),
            dict(opt_name='yogi', opt=lambda: alias.yogi(1e-1)),
            dict(opt_name='dpsgd',
                 opt=lambda: alias.dpsgd(1e-3, 10.0, 0.001, 0, 0.2)),
        ),
        target=(_setup_parabola, _setup_rosenbrock),
        dtype=(jnp.float32, jnp.complex64),
    )
    def test_optimization(self, opt_name, opt, target, dtype):
        if (opt_name in ('fromage', 'noisy_sgd', 'sm3')
                and jnp.iscomplexobj(dtype)):
            raise absltest.SkipTest(
                f'{opt_name} does not support complex parameters.')

        opt = opt()
        initial_params, final_params, get_updates = target(dtype)

        @jax.jit
        def step(params, state):
            updates = get_updates(params)
            if opt_name == 'dpsgd':
                updates = updates[None]
            # Complex gradients need to be conjugated before being added to parameters
            # https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29
            updates = jax.tree_map(lambda x: x.conj(), updates)
            updates, state = opt.update(updates, state, params)
            params = update.apply_updates(params, updates)
            return params, state

        params = initial_params
        state = opt.init(params)
        for _ in range(10000):
            params, state = step(params, state)

        chex.assert_tree_all_close(params, final_params, rtol=3e-2, atol=3e-2)

    @parameterized.named_parameters([
        ('float32', 'float32'),
        ('bfloat16', 'bfloat16'),
        ('complex64', 'complex64'),
        ('None', None),
    ])
    def test_explicit_dtype(self, dtype):
        expected_dtype = jax.dtypes.canonicalize_dtype(
            dtype)  # None -> float32
        tx = alias.sgd(0.1, momentum=0.9, accumulator_dtype=dtype)
        trace_state, _ = tx.init(jnp.array([0.0, 0.0]))
        self.assertEqual(expected_dtype, trace_state.trace.dtype)
        tx = alias.adam(0.1, mu_dtype=dtype)
        adam_state, _ = tx.init(jnp.array([0.0, 0.0]))
        self.assertEqual(expected_dtype, adam_state.mu.dtype)
        tx = alias.adamw(0.1, mu_dtype=dtype)
        adam_state, _, _ = tx.init(jnp.array([0.0, 0.0]))
        self.assertEqual(expected_dtype, adam_state.mu.dtype)