Пример #1
0
def group_lasso_example():

    def selector(p, slice):
        return np.identity(p)[slice]
    penalties = [l2norm(selector(500, slice(i*100,(i+1)*100)), l=.1) for i in range(5)]
    penalties[0].l = 250.
    penalties[1].l = 225.
    penalties[2].l = 150.
    penalties[3].l = 100.
    group_lasso = seminorm(*penalties)

    X = np.random.standard_normal((1000,500))
    Y = np.random.standard_normal((1000,))
    regloss = squaredloss(X,Y)
    p=regloss.add_seminorm(group_lasso)
    solver=FISTA(p)
    solver.debug = True
    vals = solver.fit(max_its=2000, min_its=20,tol=1e-10)
    soln = solver.problem.coefs

    # solution

    pylab.figure(num=1)
    pylab.clf()
    pylab.plot(soln, c='g')

    # objective values

    pylab.figure(num=2)
    pylab.clf()
    pylab.plot(vals)
Пример #2
0
def group_lasso_example():
    def selector(p, slice):
        return np.identity(p)[slice]

    penalties = [
        l2norm(selector(500, slice(i * 100, (i + 1) * 100)), l=.1)
        for i in range(5)
    ]
    penalties[0].l = 250.
    penalties[1].l = 225.
    penalties[2].l = 150.
    penalties[3].l = 100.
    group_lasso = seminorm(*penalties)

    X = np.random.standard_normal((1000, 500))
    Y = np.random.standard_normal((1000, ))
    regloss = squaredloss(X, Y)
    p = regloss.add_seminorm(group_lasso)
    solver = FISTA(p)
    solver.debug = True
    vals = solver.fit(max_its=2000, min_its=20, tol=1e-10)
    soln = solver.problem.coefs

    # solution

    pylab.figure(num=1)
    pylab.clf()
    pylab.plot(soln, c='g')

    # objective values

    pylab.figure(num=2)
    pylab.clf()
    pylab.plot(vals)
Пример #3
0
class conjugate(composite):

    def __init__(self, atom, quadratic=None, tol=1e-8):

        # we copy the atom because we will modify its quadratic part
        self.atom = copy(atom)

        if self.atom.quadratic is None:
            self.atom.set_quadratic(0, None, None, 0)
        
        if quadratic is not None:
            totalq = self.atom.quadratic + quadratic
            self.atom.set_quadratic(totalq.coef,
                                    totalq.offset,
                                    totalq.linear_term,
                                    totalq.constant_term)

        if self.atom.quadratic in [0, None]:
            raise ValueError('quadratic coefficient must be non-zero')

        self.solver = FISTA(self.atom)
        self.tol = tol
        #XXX we need a better way to pass around the Lipschitz constant
        # should go in the container class
        if hasattr(self.atom, "lipschitz"):
            self._backtrack = False
            # self._smooth_function_linear.lipschitz = atom.lipschitz + self.atom.quadratic.coef
        else:
            self._backtrack = True
        self._have_solved_once = False

    def smooth_objective(self, x, mode='both', check_feasibility=False):
        """
        Evaluate the conjugate function and/or its gradient

        if mode == 'both', return both function value and gradient
        if mode == 'grad', return only the gradient
        if mode == 'func', return only the function value
        """

        self.solver.debug = False
        self.atom.quadratic.linear_term -= x.T
        self.solver.fit(max_its=5000, tol=self.tol, backtrack=self._backtrack)
        minimizer = self.atom.coefs
            
        # retain a reference
        self.argmin = minimizer
        if mode == 'both':
            v = self.atom.objective(minimizer)
            return -v, minimizer
        elif mode == 'func':
            v = self.atom.objective(minimizer)
            return -v
        elif mode == 'grad':
            return minimizer
        else:
            raise ValueError("mode incorrectly specified")
        self.atom.quadratic.linear_term += x
Пример #4
0
    def __init__(self, atom, quadratic=None, tol=1e-8):

        # we copy the atom because we will modify its quadratic part
        self.atom = copy(atom)

        if self.atom.quadratic is None:
            self.atom.set_quadratic(0, None, None, 0)
        
        if quadratic is not None:
            totalq = self.atom.quadratic + quadratic
            self.atom.set_quadratic(totalq.coef,
                                    totalq.offset,
                                    totalq.linear_term,
                                    totalq.constant_term)

        if self.atom.quadratic in [0, None]:
            raise ValueError('quadratic coefficient must be non-zero')

        self.solver = FISTA(self.atom)
        self.tol = tol
        #XXX we need a better way to pass around the Lipschitz constant
        # should go in the container class
        if hasattr(self.atom, "lipschitz"):
            self._backtrack = False
            # self._smooth_function_linear.lipschitz = atom.lipschitz + self.atom.quadratic.coef
        else:
            self._backtrack = True
        self._have_solved_once = False
Пример #5
0
 def __init__(self, smooth_f, epsilon=0.01):
     self._smooth_function = smooth_f
     self._linear = linear(np.zeros(smooth_f.primal_shape))
     self._quadratic = l2normsq(smooth_f.primal_shape, l=epsilon / 2.)
     self._smooth_function_linear = smooth_function(smooth_f, self._linear,
                                                    self._quadratic)
     self._solver = FISTA(self._smooth_function_linear)
     #XXX we need a better way to pass around the Lipschitz constant
     # should go in the container class
     if hasattr(smooth_f, "L"):
         self._backtrack = False
         self._smooth_function_linear.L = smooth_f.L + epsilon
     else:
         self._backtrack = True
     self._have_solved_once = False
     self.epsilon = epsilon
Пример #6
0
class conjugate(object):
    def __init__(self, smooth_f, epsilon=0.01):
        self._smooth_function = smooth_f
        self._linear = linear(np.zeros(smooth_f.primal_shape))
        self._quadratic = l2normsq(smooth_f.primal_shape, l=epsilon / 2.)
        self._smooth_function_linear = smooth_function(smooth_f, self._linear,
                                                       self._quadratic)
        self._solver = FISTA(self._smooth_function_linear)
        #XXX we need a better way to pass around the Lipschitz constant
        # should go in the container class
        if hasattr(smooth_f, "L"):
            self._backtrack = False
            self._smooth_function_linear.L = smooth_f.L + epsilon
        else:
            self._backtrack = True
        self._have_solved_once = False
        self.epsilon = epsilon

    def smooth_eval(self, x, mode='both'):
        """
        Evaluate the conjugate function and/or its gradient

        if mode == 'both', return both function value and gradient
        if mode == 'grad', return only the gradient
        if mode == 'func', return only the function value
        """

        self._solver.debug = False

        self._linear.vector[:] = -x
        self._solver.fit(max_its=1000, tol=1.0e-08, backtrack=self._backtrack)
        minimizer = self._smooth_function_linear.coefs

        if mode == 'both':
            v = self._smooth_function_linear.smooth_eval(minimizer,
                                                         mode='func')
            return -v, minimizer
        elif mode == 'func':
            v = self._smooth_function_linear.smooth_eval(minimizer,
                                                         mode='func')
            return -v
        elif mode == 'grad':
            return minimizer
        else:
            raise ValueError("mode incorrectly specified")
Пример #7
0
class conjugate(object):

    def __init__(self, smooth_f, epsilon=0.01):
        self._smooth_function = smooth_f
        self._linear = linear(np.zeros(smooth_f.primal_shape))
        self._quadratic = l2normsq(smooth_f.primal_shape, l=epsilon/2.)
        self._smooth_function_linear = smooth_function(smooth_f, self._linear, self._quadratic)
        self._solver = FISTA(self._smooth_function_linear)
        #XXX we need a better way to pass around the Lipschitz constant
        # should go in the container class
        if hasattr(smooth_f, "L"):
            self._backtrack = False
            self._smooth_function_linear.L = smooth_f.L + epsilon
        else:
            self._backtrack = True
        self._have_solved_once = False
        self.epsilon = epsilon

    def smooth_eval(self, x, mode='both'):
        """
        Evaluate the conjugate function and/or its gradient

        if mode == 'both', return both function value and gradient
        if mode == 'grad', return only the gradient
        if mode == 'func', return only the function value
        """

        self._solver.debug = False

        self._linear.vector[:] = -x
        self._solver.fit(max_its=1000, tol=1.0e-08, backtrack=self._backtrack)
        minimizer = self._smooth_function_linear.coefs
            
        if mode == 'both':
            v = self._smooth_function_linear.smooth_eval(minimizer, mode='func')
            return -v, minimizer
        elif mode == 'func':
            v = self._smooth_function_linear.smooth_eval(minimizer, mode='func')
            return -v
        elif mode == 'grad':
            return minimizer
        else:
            raise ValueError("mode incorrectly specified")
Пример #8
0
def test_group_lasso_sparse(n=100):
    def selector(p, slice):
        return np.identity(p)[slice]

    def selector_sparse(p, slice):
        return sparse.csr_matrix(np.identity(p)[slice])

    X = np.random.standard_normal((1000, 500))
    Y = np.random.standard_normal((1000, ))

    penalties = [
        l2norm(selector(500, slice(i * 100, (i + 1) * 100)), l=.1)
        for i in range(5)
    ]
    penalties[0].l = 250.
    penalties[1].l = 225.
    penalties[2].l = 150.
    penalties[3].l = 100.
    group_lasso = seminorm(*penalties)
    regloss = squaredloss(X, Y)
    p = regloss.add_seminorm(group_lasso)
    solver = FISTA(p)
    solver.debug = True
    t1 = time.time()
    vals = solver.fit(max_its=2000, min_its=20, tol=1e-8)
    soln1 = solver.problem.coefs
    t2 = time.time()
    dt1 = t2 - t1

    penalties = [
        l2norm(selector_sparse(500, slice(i * 100, (i + 1) * 100)), l=.1)
        for i in range(5)
    ]
    penalties[0].l = 250.
    penalties[1].l = 225.
    penalties[2].l = 150.
    penalties[3].l = 100.
    group_lasso = seminorm(*penalties)
    regloss = squaredloss(X, Y)
    p = regloss.add_seminorm(group_lasso)
    solver = FISTA(p)
    solver.debug = True
    t1 = time.time()
    vals = solver.fit(max_its=2000, min_its=20, tol=1e-8)
    soln2 = solver.problem.coefs
    t2 = time.time()
    dt2 = t2 - t1

    print "Times", dt1, dt2
    print soln1[range(10)]
    print soln2[range(10)]
    np.testing.assert_almost_equal(soln1, soln2)
Пример #9
0
def lasso_example(compare=False):

    l1 = 20.
    sparsity = l1norm(500, l=l1 / 2.)
    X = np.random.standard_normal((1000, 500))
    Y = np.random.standard_normal((1000, ))
    regloss = squaredloss(X, Y)
    sparsity2 = l1norm(500, l=l1 / 2.)
    #p=regloss.add_seminorm(sparsity)
    p = regloss.add_seminorm(seminorm(sparsity, sparsity2))
    solver = FISTA(p)
    solver.debug = True
    vals = solver.fit(max_its=2000, min_its=100)
    soln = solver.problem.coefs

    if not compare:
        # solution
        pylab.figure(num=1)
        pylab.clf()
        pylab.plot(soln, c='g')

        # objective values
        pylab.figure(num=2)
        pylab.clf()
        pylab.plot(vals)
    else:
        p2 = lasso.gengrad((X, Y))
        p2.assign_penalty(l1=l1)
        opt = FISTA(p2)
        opt.debug = True
        opt.fit(tol=1e-10, max_its=5000)
        beta = opt.problem.coefs
        print "Terminal error with seminorm:", np.min(
            vals), "\tTerminal error with lasso", p.obj(
                beta), "\nTerminal relative error:", (
                    np.min(vals) - p.obj(beta)) / p.obj(beta)

        pylab.figure(num=1)
        pylab.clf()
        #pylab.plot(soln, c='g')
        pylab.scatter(soln, beta)

        pylab.figure(num=2)
        pylab.clf()
        pylab.plot(vals)
Пример #10
0
def test_1d_fused_lasso(n=100):

    l1 = 1.

    sparsity1 = l1norm(n, l=l1)
    D = (np.identity(n) - np.diag(np.ones(n - 1), -1))[1:]
    extra = np.zeros(n)
    extra[0] = 1.
    D = np.vstack([D, extra])
    D = sparse.csr_matrix(D)

    fused = seminorm(l1norm(D, l=l1))

    X = np.random.standard_normal((2 * n, n))
    Y = np.random.standard_normal((2 * n, ))
    regloss = squaredloss(X, Y)
    p = regloss.add_seminorm(fused)
    solver = FISTA(p)
    solver.debug = True
    vals1 = solver.fit(max_its=25000, tol=1e-12)
    soln1 = solver.problem.coefs

    B = np.array(sparse.tril(np.ones((n, n))).todense())
    X2 = np.dot(X, B)

    time.sleep(3)

    D2 = np.diag(np.ones(n))
    p2 = lasso.gengrad((X2, Y))
    p2.assign_penalty(l1=l1)
    opt = FISTA(p2)
    opt.debug = True
    opt.fit(tol=1e-12, max_its=25000)
    beta = opt.problem.coefs
    soln2 = np.dot(B, beta)

    print soln1[range(10)]
    print soln2[range(10)]
    print p.obj(soln1), p.obj(soln2)
    #np.testing.assert_almost_equal(soln1,soln2)

    return vals1
Пример #11
0
 def __init__(self, smooth_f, epsilon=0.01):
     self._smooth_function = smooth_f
     self._linear = linear(np.zeros(smooth_f.primal_shape))
     self._quadratic = l2normsq(smooth_f.primal_shape, l=epsilon/2.)
     self._smooth_function_linear = smooth_function(smooth_f, self._linear, self._quadratic)
     self._solver = FISTA(self._smooth_function_linear)
     #XXX we need a better way to pass around the Lipschitz constant
     # should go in the container class
     if hasattr(smooth_f, "L"):
         self._backtrack = False
         self._smooth_function_linear.L = smooth_f.L + epsilon
     else:
         self._backtrack = True
     self._have_solved_once = False
     self.epsilon = epsilon
Пример #12
0
def test_lasso(n=100):

    l1 = 1.
    sparsity1 = l1norm(n, l=l1 * 0.75)
    sparsity2 = l1norm(n, l=l1 * 0.25)
    sparsity = l1norm(n, l=l1)

    X = np.random.standard_normal((5000, n))
    Y = np.random.standard_normal((5000, ))
    regloss = squaredloss(X, Y)

    #p=regloss.add_seminorm(sparsity)
    #p=regloss.add_seminorm(seminorm(sparsity1,sparsity2),initial=np.zeros(n))
    p = regloss.add_seminorm(seminorm(sparsity), initial=np.zeros(n))
    solver = FISTA(p)
    solver.debug = True
    t1 = time.time()
    vals1 = solver.fit(max_its=800, tol=1e-18, set_prox_tol=True)
    t2 = time.time()
    dt1 = t2 - t1
    soln = solver.problem.coefs

    time.sleep(5)

    p2 = lasso.gengrad((X, Y))  #,initial_coefs = np.random.normal(0,1,n))
    p2.assign_penalty(l1=l1)
    opt = FISTA(p2)
    opt.debug = True
    t1 = time.time()
    vals2 = opt.fit(tol=1e-18, max_its=800)
    t2 = time.time()
    dt2 = t2 - t1
    beta = opt.problem.coefs

    print soln[range(10)]
    print beta[range(10)]

    print p.obj(soln), p.obj(beta)
    print p2.obj(soln), p2.obj(beta)
    print "Times", dt1, dt2

    return [vals1, vals2]
Пример #13
0
def test_group_lasso_sparse(n=100):

    def selector(p, slice):
        return np.identity(p)[slice]

    def selector_sparse(p, slice):
        return sparse.csr_matrix(np.identity(p)[slice])

    X = np.random.standard_normal((1000,500))
    Y = np.random.standard_normal((1000,))


    penalties = [l2norm(selector(500, slice(i*100,(i+1)*100)), l=.1) for i in range(5)]
    penalties[0].l = 250.
    penalties[1].l = 225.
    penalties[2].l = 150.
    penalties[3].l = 100.
    group_lasso = seminorm(*penalties)
    regloss = squaredloss(X,Y)
    p=regloss.add_seminorm(group_lasso)
    solver=FISTA(p)
    solver.debug = True
    t1 = time.time()
    vals = solver.fit(max_its=2000, min_its=20,tol=1e-8)
    soln1 = solver.problem.coefs
    t2 = time.time()
    dt1 = t2 - t1


    penalties = [l2norm(selector_sparse(500, slice(i*100,(i+1)*100)), l=.1) for i in range(5)]
    penalties[0].l = 250.
    penalties[1].l = 225.
    penalties[2].l = 150.
    penalties[3].l = 100.
    group_lasso = seminorm(*penalties)
    regloss = squaredloss(X,Y)
    p=regloss.add_seminorm(group_lasso)
    solver=FISTA(p)
    solver.debug = True
    t1 = time.time()
    vals = solver.fit(max_its=2000, min_its=20,tol=1e-8)
    soln2 = solver.problem.coefs
    t2 = time.time()
    dt2 = t2- t1

    print "Times", dt1, dt2
    print soln1[range(10)]
    print soln2[range(10)]
    np.testing.assert_almost_equal(soln1,soln2)
Пример #14
0
def test_1d_fused_lasso(n=100):

    l1 = 1.


    sparsity1 = l1norm(n, l=l1)
    D = (np.identity(n) - np.diag(np.ones(n-1),-1))[1:]
    extra = np.zeros(n)
    extra[0] = 1.
    D = np.vstack([D,extra])
    D = sparse.csr_matrix(D)

    fused = seminorm(l1norm(D, l=l1))

    X = np.random.standard_normal((2*n,n))
    Y = np.random.standard_normal((2*n,))
    regloss = squaredloss(X,Y)
    p=regloss.add_seminorm(fused)
    solver=FISTA(p)
    solver.debug = True
    vals1 = solver.fit(max_its=25000, tol=1e-12)
    soln1 = solver.problem.coefs

    B = np.array(sparse.tril(np.ones((n,n))).todense())
    X2 = np.dot(X,B)

    time.sleep(3)
    
    D2 = np.diag(np.ones(n))
    p2 = lasso.gengrad((X2, Y))
    p2.assign_penalty(l1=l1)
    opt = FISTA(p2)
    opt.debug = True
    opt.fit(tol=1e-12,max_its=25000)
    beta = opt.problem.coefs
    soln2 = np.dot(B,beta)

    print soln1[range(10)]
    print soln2[range(10)]
    print p.obj(soln1), p.obj(soln2)
    #np.testing.assert_almost_equal(soln1,soln2)

    return vals1
Пример #15
0
def lasso_example(compare=False):

    l1 = 20.
    sparsity = l1norm(500, l=l1/2.)
    X = np.random.standard_normal((1000,500))
    Y = np.random.standard_normal((1000,))
    regloss = squaredloss(X,Y)
    sparsity2 = l1norm(500, l=l1/2.)
    #p=regloss.add_seminorm(sparsity)
    p=regloss.add_seminorm(seminorm(sparsity,sparsity2))
    solver=FISTA(p)
    solver.debug = True
    vals = solver.fit(max_its=2000, min_its = 100)
    soln = solver.problem.coefs

    if not compare:
        # solution
        pylab.figure(num=1)
        pylab.clf()
        pylab.plot(soln, c='g')

        # objective values
        pylab.figure(num=2)
        pylab.clf()
        pylab.plot(vals)
    else:
        p2 = lasso.gengrad((X, Y))
        p2.assign_penalty(l1=l1)
        opt = FISTA(p2)
        opt.debug = True
        opt.fit(tol=1e-10,max_its=5000)
        beta = opt.problem.coefs
        print "Terminal error with seminorm:", np.min(vals), "\tTerminal error with lasso", p.obj(beta) ,"\nTerminal relative error:", (np.min(vals) - p.obj(beta))/p.obj(beta)

        pylab.figure(num=1)
        pylab.clf()
        #pylab.plot(soln, c='g')
        pylab.scatter(soln,beta)
        
        pylab.figure(num=2)
        pylab.clf()
        pylab.plot(vals)
Пример #16
0
def test_lasso(n=100):

    l1 = 1.
    sparsity1 = l1norm(n, l=l1*0.75)
    sparsity2 = l1norm(n, l=l1*0.25)
    sparsity = l1norm(n, l=l1)
    
    X = np.random.standard_normal((5000,n))
    Y = np.random.standard_normal((5000,))
    regloss = squaredloss(X,Y)


    #p=regloss.add_seminorm(sparsity)
    #p=regloss.add_seminorm(seminorm(sparsity1,sparsity2),initial=np.zeros(n))
    p=regloss.add_seminorm(seminorm(sparsity),initial=np.zeros(n))
    solver=FISTA(p)
    solver.debug = True
    t1 = time.time()
    vals1 = solver.fit(max_its=800,tol=1e-18,set_prox_tol=True)
    t2 = time.time()
    dt1 = t2 - t1
    soln = solver.problem.coefs

    time.sleep(5)


    p2 = lasso.gengrad((X, Y))#,initial_coefs = np.random.normal(0,1,n))
    p2.assign_penalty(l1=l1)
    opt = FISTA(p2)
    opt.debug = True
    t1 = time.time()
    vals2 = opt.fit(tol=1e-18,max_its=800)
    t2 = time.time()
    dt2 = t2 - t1
    beta = opt.problem.coefs


    print soln[range(10)]
    print beta[range(10)]

    print p.obj(soln), p.obj(beta)
    print p2.obj(soln), p2.obj(beta)
    print "Times", dt1, dt2
    
    return [vals1, vals2]