Beispiel #1
0
    def test_simple_loss_multiple_args(self):
        # test that args and kwargs are both handled by optimizers
        x0 = np.zeros(2), np.zeros(3)
        ans = np.arange(2.), np.arange(3.)

        opt = fmin_l_bfgs_b(simple_loss_multiple_args, x0)
        opt = fmin_l_bfgs_b(simple_loss_multiple_args,
                            init_kwargs=dict(p=x0[0], q=x0[1]))
        opt = fmin_l_bfgs_b(simple_loss_multiple_args,
                            init_args=x0[0],
                            init_kwargs=dict(q=x0[1]))
        self.assertTrue(np.allclose(opt[0], ans[0]))
        self.assertTrue(np.allclose(opt[1], ans[1]))

        opt = fmin_cg(simple_loss_multiple_args, x0)
        opt = fmin_cg(simple_loss_multiple_args,
                      init_kwargs=dict(p=x0[0], q=x0[1]))
        opt = fmin_cg(simple_loss_multiple_args,
                      init_args=x0[0],
                      init_kwargs=dict(q=x0[1]))
        self.assertTrue(np.allclose(opt[0], ans[0]))
        self.assertTrue(np.allclose(opt[1], ans[1]))

        opt = fmin_ncg(simple_loss_multiple_args, x0)
        opt = fmin_ncg(simple_loss_multiple_args,
                       init_kwargs=dict(p=x0[0], q=x0[1]))
        opt = fmin_ncg(simple_loss_multiple_args,
                       init_args=x0[0],
                       init_kwargs=dict(q=x0[1]))
        self.assertTrue(np.allclose(opt[0], ans[0]))
        self.assertTrue(np.allclose(opt[1], ans[1]))
Beispiel #2
0
 def fit(self, X, y=None):
     n_samples, n_dim = X.shape
     W = np.random.randn(n_dim, self.n_features)
     b = np.random.randn(self.n_features)
     obj_fn = self.get_objective_fn(X)
     self.W_, self.b_ = optimize.fmin_l_bfgs_b(obj_fn, (W, b), iprint=1, maxfun=self.n_iterations)
     return self
Beispiel #3
0
def test_svm():
    rng = np.random.RandomState(1)

    # -- create some fake data
    x = rng.rand(10, 5)
    y = 2 * (rng.rand(10) > 0.5) - 1
    l2_regularization = 1e-4

    # -- loss function
    def loss_fn(weights, bias):
        margin = y * (np.dot(x, weights) + bias)
        loss = np.maximum(0, 1 - margin) ** 2
        l2_cost = 0.5 * l2_regularization * np.dot(weights, weights)
        loss = np.mean(loss) + l2_cost
        print 'ran loss_fn(), returning', loss
        return loss

    # -- call optimizer
    w_0, b_0 = np.zeros(5), np.zeros(())
    w, b = fmin_l_bfgs_b(loss_fn, init_args=(w_0, b_0))

    final_loss = loss_fn(w, b)

    assert np.allclose(final_loss, 0.7229)

    print 'optimization successful!'
Beispiel #4
0
 def fit(self, X, y=None):
     n_samples, n_dim = X.shape
     W = np.random.randn(n_dim, self.n_features)
     b = np.random.randn(self.n_features)
     obj_fn = self.get_objective_fn(X)
     self.W_, self.b_ = optimize.fmin_l_bfgs_b(obj_fn, (W, b),
                                               iprint=1,
                                               maxfun=self.n_iterations)
     return self
Beispiel #5
0
    def test_subtensor(self):
        x0 = np.zeros(2)
        ans = [-3, 4]

        opt = fmin_l_bfgs_b(subtensor_loss, x0)
        self.assertTrue(np.allclose(opt, ans))

        opt = fmin_cg(subtensor_loss, x0)
        self.assertTrue(np.allclose(opt, ans))

        opt = fmin_ncg(subtensor_loss, x0)
        self.assertTrue(np.allclose(opt, ans))
    def test_L2(self):
        x0 = np.zeros((5, 3))
        ans = np.array([[+3.0, -1.0, -5.0], [+1.5, -0.5, -2.5], [
            +0.0, 0.0, 0.0
        ], [-1.5, 0.5, 2.5], [-3.0, 1.0, 5.0]]) / 10.0

        opt = fmin_l_bfgs_b(l2_loss, x0)
        self.assertTrue(np.allclose(opt, ans))

        opt = fmin_cg(l2_loss, x0)
        self.assertTrue(np.allclose(opt, ans))

        opt = fmin_ncg(l2_loss, x0)
        self.assertTrue(np.allclose(opt, ans))
Beispiel #7
0
    def test_L2(self):
        x0 = np.zeros((5, 3))
        ans = np.array([[+3.0, -1.0, -5.0],
                        [+1.5, -0.5, -2.5],
                        [+0.0,  0.0,  0.0],
                        [-1.5,  0.5,  2.5],
                        [-3.0,  1.0,  5.0]]) / 10.0

        opt = fmin_l_bfgs_b(l2_loss, x0)
        self.assertTrue(np.allclose(opt, ans))

        opt = fmin_cg(l2_loss, x0)
        self.assertTrue(np.allclose(opt, ans))

        opt = fmin_ncg(l2_loss, x0)
        self.assertTrue(np.allclose(opt, ans))
Beispiel #8
0
def train_svm(x, y):
    l2_regularization = 1e-4
    print y
    def loss_fn(weights, bias):
        margin = y * (np.dot(x, weights) + bias)
        loss = np.maximum(0, 1 - margin) ** 2
        l2_cost = 0.5 * l2_regularization * np.dot(weights, weights)
        loss = np.mean(loss) + l2_cost
        return loss


    n, m = x.shape
    w0 = np.zeros(m)
    b0 = np.zeros(())
    w, b = fmin_l_bfgs_b(loss_fn, init_args=(w0, b0))

    return w, b
Beispiel #9
0
    def test_svm(self):
        rng = np.random.RandomState(1)

        # -- create some fake data
        x = rng.rand(10, 5)
        y = 2 * (rng.rand(10) > 0.5) - 1
        l2_regularization = 1e-4

        def loss_fn(weights, bias):
            margin = y * (np.dot(x, weights) + bias)
            loss = np.maximum(0, 1 - margin) ** 2
            l2_cost = 0.5 * l2_regularization * np.dot(weights, weights)
            loss = np.mean(loss) + l2_cost
            print 'ran loss_fn(), returning', loss
            return loss

        w, b = fmin_l_bfgs_b(loss_fn, (np.zeros(5), np.zeros(())))
        final_loss = loss_fn(w, b)
        assert np.allclose(final_loss, 0.7229)