def _make_loss_functions(self, mode=None): """Return pair `f_loss, f_d_loss` of functions. - f_loss returns the current loss, - f_d_loss returns the gradient of that loss wrt parameters, matrix of the loss. """ d_loss = self._d_loss() if self.gradient_clip: d_loss = project_into_l2_ball(d_loss, self.gradient_clip) args = list(self.data_arguments) f_loss = self.function(args, 'loss', explicit_pars=True, mode=mode) f_d_loss = self.function(args, d_loss, explicit_pars=True, mode=mode) return f_loss, f_d_loss
def test_project_into_l2_ball_batch(): x = T.matrix() x_projected = project_into_l2_ball(x, 1) f = theano.function([x], x_projected) x = np.array([[.1, .1, .1], [0, 1, 0]]).astype(theano.config.floatX) y = f(x) desired = np.array([ [.1, .1, .1], [0., 1, 0], ]) print y assert np.allclose(desired, y)
def test_project_into_l2_ball_batch(): x = T.matrix() x_projected = project_into_l2_ball(x, 1) f = theano.function([x], x_projected) x = np.array([[.1, .1, .1], [0, 1, 0]]) y = f(x) desired = np.array([ [.1, .1, .1], [0., 1, 0], ]) print y assert np.allclose(desired, y)
def test_project_into_l2_ball_single(): x = T.vector() x_projected = project_into_l2_ball(x, 1) f = theano.function([x], x_projected) x = np.array([.1, .1, .1]) y = f(x) assert np.allclose(np.array([.1, .1, .1]), y) x = np.array([2, 1, 1]) y = f(x) assert np.allclose(np.array([0.81649658, 0.40824829, 0.40824829]), y) x = np.array([0, 1, 0]) y = f(x) assert np.allclose(np.array([0., 1, 0]), y)
def test_project_into_l2_ball_single(): x = T.vector() x_projected = project_into_l2_ball(x, 1) f = theano.function([x], x_projected) x = np.array([.1, .1, .1]).astype(theano.config.floatX) y = f(x) assert np.allclose(np.array([.1, .1, .1]), y) x = np.array([2, 1, 1]).astype(theano.config.floatX) y = f(x) assert np.allclose(np.array([0.81649658, 0.40824829, 0.40824829]), y) x = np.array([0, 1, 0]).astype(theano.config.floatX) y = f(x) assert np.allclose(np.array([0., 1, 0]), y)