示例#1
0
def test_level():
    log = Logger(levl=40)
    log.debug('Do not appear!!')
    log.info('Do not appear!!')
    log.warning('Do not appear!!')
    log.error('Should appear!!')
    log.critical('Should appear!!')
    log.process_queue()
    log.end()
示例#2
0
def test_graphical_logging():
    log = Logger(levl=10)
    i_max = 1000

    for i in range(i_max):
        log.graphical_cost(cost=1/(i+1), n_iteration=i)
        if i == 50000:
            raise KeyboardInterrupt()
    log.process_queue()
    log.end()
示例#3
0
def test_progress():
    log = Logger(levl=10)
    i_max = 10
    j_max = 100
    for i in range(i_max):
        log.progress(name='progress1', iteration=i, i_max=i_max)
        for j in range(j_max):
            log.progress(name='progress2', iteration=j, i_max=j_max)

    log.process_queue()
    log.end()
示例#4
0
import numpy as np
from toolboxTom.logger import Logger
log = Logger('MomentGD')

from . import _GradientDescent


class MomentGradientDescent(_GradientDescent):
    """Gradient Descent with moment update"""
    def __init__(self,
                 problem,
                 decreasing_rate='',
                 alpha_moment=0.9,
                 restart=True,
                 debug=0,
                 **kwargs):
        if debug:
            debug -= 1
            log.set_level(10)
        super(MomentGradientDescent, self).__init__(problem,
                                                    decreasing_rate,
                                                    debug=debug,
                                                    **kwargs)
        self.p_grad = np.zeros(self.pb.pt.shape)
        self.alpha_moment = alpha_moment
        self.restart = restart

    def __repr__(self):
        return 'MomentDescent - ' + str(self.restart)

    def p_update(self):
示例#5
0
import numpy as np
from math import sqrt
from time import time

from toolboxTom.logger import Logger
log = Logger('_GD', 10)

from . import get_log_rate, _GradientDescent


class _AlternateDescent(object):
    """Class to hold gradient descent properties"""

    id_gd = 0

    def __init__(self, problem, decreasing_rate='sqrt',
                 stop='', tol=1e-10, graphical_cost=None,
                 name=None, debug=0, logging=False,
                 log_rate='log', i_max=1000, t_max=40,
                 methods=[]):
        '''Gradient Descent handeler

        Parameters
        ----------
        problem: _CoupledProblem to solve
        param: list of the Parameters
        alpha: learning rate controler
        grad: function computing the gradient
            given the current parameters
        decreasing_rate: {'sqrt', 'linear'} deacreasing rate
            for the learning rate
示例#6
0
文件: map.py 项目: tomMoral/Toolbox
import numpy as np
import sys
from toolboxTom.logger import Logger
import multiprocessing
from multiprocessing import Process, Queue
log = Logger(name='map')
log.set_level(0)


class WorkerGroups(Process):
    def __init__(self, qin, qout, fun, id_w=0, **kwargs):
        super(WorkerGroups, self).__init__(name='Worker n°{}'.format(id_w))
        self.qin = qin
        self.qout = qout
        self.id = id_w
        self.fun = fun
        self.args = kwargs

    def run(self):
        idp, p = self.qin.get()
        while idp is not None:
            log.debug('Worker {} - |qin| = {}'
                      ''.format(self.id, self.qin.qsize()))
            params = dict(p=p, **self.args)
            try:
                self.qout.put((idp, self.fun(**params)))
            except:
                import traceback
                msg = traceback.format_exc()
                print(msg)
            idp, p = self.qin.get()
示例#7
0
from numpy import ndarray
from multiprocessing import Queue
from .worker_solver import WorkerSolver
from .problem import _Problem

from toolboxTom.logger import Logger

log = Logger('//Solver', 20)


class ParalelSolver(object):
    """Paralell sparse coding"""
    def __init__(self, n_jobs=4, debug=0, **kwargs):
        super(ParalelSolver, self).__init__()
        self.n_jobs = n_jobs
        self.param = kwargs
        if debug:
            log.set_level(10)
            debug -= 1
        self.debug = debug

    def solve(self, problems, **kwargs):
        if type(problems) not in [list, ndarray]:
            problems = [problems]
        assert issubclass(
            type(problems[0]),
            _Problem), ('ParalelSolver argument is not a _Problem subclass')
        qin = Queue()
        qout = Queue()
        for i, pb in enumerate(problems):
            qin.put((i, pb))
示例#8
0
import numpy as np

from optim.problem import _Problem
from toolboxTom.logger import Logger

log = Logger()


class Test(_Problem):
    """Test class for problem"""
    def __init__(self, lr=.1, **kwargs):
        super(Test, self).__init__(sizes=(1000, ))
        A = np.random.random(size=(100, 1000))
        A = A < 0.07
        A = A * np.random.normal(scale=2, size=A.shape)
        self.A = A + A[::-1]
        self.b = np.random.normal(size=(100, ))
        self.lr = lr
        self.L = np.sum(A * A)

    def cost(self, pt):
        res = self.A.dot(pt[0]) - self.b
        res = np.sum(res * res)
        return 0.5 * res + self.lr * np.sum(abs(pt[0]))

    def grad(self, pt):
        return [self.A.T.dot(self.A.dot(pt[0]) - self.b)]

    def prox(self, pt):
        return np.sign(pt) * np.maximum(abs(pt) - self.lr, 0)
示例#9
0
from time import time

from toolboxTom.logger import Logger
from . import _GradientDescent
log = Logger(name='Solver', levl=20)


class Solver(object):
    """Encode a signal in the convolutional dictionary"""
    def __init__(self,
                 optim=_GradientDescent,
                 max_time=None,
                 i_max=1e6,
                 debug=0,
                 **kwargs):
        self.optim = optim
        self.param = kwargs
        self.i_max = i_max
        self.max_time = max_time
        if debug:
            log.set_level(10)

    def solve(self, pb, **kwargs):
        self.pb = pb
        self.param.update(**kwargs)
        solver = self.optim(self.pb, **self.param)
        finished = False
        self.start_time = time()
        self.iter = 0
        while not finished and not self._stop():
            finished = solver.update()
示例#10
0
import numpy as np

from . import _GradientDescent
from toolboxTom.logger import Logger

log = Logger(name='ProximalDescent')


class ProximalDescent(_GradientDescent):
    """Gradient Descent with the nesterov momentum"""
    def __init__(self,
                 problem,
                 decreasing_rate='',
                 f_theta='boyd',
                 restart=False,
                 debug=0,
                 **kwargs):
        self.restart = restart
        if debug > 0:
            debug -= 1
            log.set_level(10)
        super(ProximalDescent, self).__init__(problem,
                                              decreasing_rate=decreasing_rate,
                                              debug=debug,
                                              **kwargs)
        self.theta = [1, 1]
        if type(f_theta) is float:
            self.theta = [1 - f_theta] * 2
        self.p_grad = np.zeros(self.pb.pt.shape)
        self.f_theta = f_theta
        self.alpha = 1 / self.pb.L