def test_call(self, step, res): """Test call. Test that attribute of internal optimizer is correctly rerouted to the internal optimizer Args: step: step number to 'GammaBetaDecreasingStep' 'Scheduler'. res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'. """ beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32) gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32) scheduler = opt.GammaBetaDecreasingStep() scheduler.initialize(beta, gamma) step = _ops.convert_to_tensor_v2(step, dtype=tf.float32) lr = scheduler(step) self.assertAllClose(lr.numpy(), res)
def radius(self): """Radius, R, of the hypothesis space W. W is a convex set that forms the hypothesis space. Returns: a tensor """ return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument """Lipchitz constant, L. Args: class_weight: class weights used Returns: constant L """ return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
def beta(self, class_weight): # pylint: disable=unused-argument """Smoothness, beta. Args: class_weight: the class weights as scalar or 1d tensor, where its dimensionality is equal to the number of outputs. Returns: Beta """ return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
def test_run(): loss = TestLoss(1, 1, 1) bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1, (1,), 1) model.compile(bolton, loss) model.layers[0].kernel = \ model.layers[0].kernel_initializer((model.layer_input_shape[0], model.n_outputs)) with bolton(noise, epsilon, model.layers, class_weights, 1, 1) as _: pass return _ops.convert_to_tensor_v2(bolton.epsilon, dtype=tf.float32)
def project_fn(r): loss = TestLoss(1, 1, r) bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(n_out, shape, init_value) model.compile(bolton, loss) model.layers[0].kernel = \ model.layers[0].kernel_initializer((model.layer_input_shape[0], model.n_outputs)) bolton._is_init = True # pylint: disable=protected-access bolton.layers = model.layers bolton.epsilon = 2 bolton.noise_distribution = 'laplace' bolton.n_outputs = 1 bolton.n_samples = 1 bolton.project_weights_to_r() return _ops.convert_to_tensor_v2(bolton.layers[0].kernel, tf.float32)
def test_run(fn, args): loss = TestLoss(1, 1, 1) bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1, (1, ), 1) model.compile(bolton, loss) model.layers[0].kernel = \ model.layers[0].kernel_initializer((model.layer_input_shape[0], model.n_outputs)) bolton._is_init = True # pylint: disable=protected-access bolton.noise_distribution = 'laplace' bolton.epsilon = 1 bolton.layers = model.layers bolton.class_weights = 1 bolton.n_samples = 1 bolton.batch_size = 1 bolton.n_outputs = 1 res = getattr(bolton, fn, lambda: 'test')(*args) if res != 'test': res = 1 else: res = 0 return _ops.convert_to_tensor_v2(res, dtype=tf.float32)
def gamma(self): """Returns strongly convex parameter, gamma.""" return _ops.convert_to_tensor_v2(1, dtype=tf.float32)