Exemple #1
0
    def test_lwr_linear(self):
        """Test LWLR on random linear models of dimensions from 1 to 20.
         It should return exact results, give of take floating point imprecisions."""

        for _ in range(20):
            n = random.randint(1, 20)
            m = random.randint(1, 5)
            f = random_linear(n, m)
            cfg = {'m_channels'  : [learners.Channel('x_{}'.format(i), (0.0, 1.0))
                                    for i in range(n)],
                   's_channels'  : [learners.Channel('y_{}'.format(i), (0.0, 1.0))
                                    for i in range(m)],
                   'm_uniformize': True,
                   'options.maxiter': 500}

            learner = learners.OptimizeLearner(cfg)

            for _ in range(4*n):
                x = np.random.rand(n)
                y = f(x)
                learner.update(tools.to_signal(x, cfg['m_channels']),
                               tools.to_signal(y, cfg['s_channels']))

            for _ in range(10):
                x = np.random.rand(n).ravel()
                y = f(x)
                xp = learner.infer(tools.to_signal(y, cfg['s_channels']))
                xp = np.array(tools.to_vector(xp, cfg['m_channels']))
                self.assertTrue(np.allclose(f(xp), y, rtol = 1e-1, atol = 1e-1))
Exemple #2
0
    def test_lwr1D_linear(self):
        """Simplest test possible (well, not quite, but close)."""
        f = lambda x: 2.0 * x

        cfg = {
            'm_channels': [learners.Channel('x', (0.0, 1.0))],
            's_channels': [learners.Channel('y', (0.0, 1.0))],
            'm_uniformize': True,
            'sigma': 0.1
        }

        for learner in [
                learners.LWLRLearner(cfg),
                learners.ESLWLRLearner(cfg)
        ]:

            for _ in range(10):
                x = np.random.rand(1)
                y = f(x)
                learner.update(tools.to_signal(x, cfg['m_channels']),
                               tools.to_signal(y, cfg['s_channels']))

            for _ in range(10):
                x = np.random.rand(1).ravel()
                y = f(x)
                yp = learner.predict(tools.to_signal(x, cfg['m_channels']))
                yp = tools.to_vector(yp, cfg['s_channels'])
                self.assertTrue(np.allclose(y, yp, rtol=1e-5, atol=1e-5))
Exemple #3
0
    def test_lwr_linear(self):
        """Test LWLR on random linear models of dimensions from 1 to 20.
         It should return exact results, give of take floating point imprecisions."""

        for cpp in [True, False]:
            if cpp:
                learners.enable_fastlearners(silent_fail=False)
            else:
                learners.disable_fastlearners()

            for _ in range(20):
                n = random.randint(1, 20)
                m = random.randint(1, 5)
                f = random_linear(n, m)
                cfg = {'m_channels'  : [learners.Channel('x_{}'.format(i), (0.0, 1.0))
                                        for i in range(n)],
                       's_channels'  : [learners.Channel('y_{}'.format(i), (0.0, 1.0))
                                        for i in range(m)],
                       'm_uniformize': True,
                       'sigma'       : 1.0}

                for learner in [learners.LWLRLearner(cfg), learners.ESLWLRLearner(cfg)]:

                    for _ in range(2*n):
                        x = np.random.rand(n)
                        y = f(x)
                        learner.update(tools.to_signal(x, cfg['m_channels']),
                                       tools.to_signal(y, cfg['s_channels']))

                    for _ in range(10):
                        x = np.random.rand(n).ravel()
                        y = f(x)
                        yp = learner.predict(tools.to_signal(x, cfg['m_channels']))
                        yp = tools.to_vector(yp, cfg['s_channels'])
                        self.assertTrue(np.allclose(y, yp, rtol = 1e-5, atol = 1e-5))
Exemple #4
0
    def test_lwr_linear(self):
        """Test LWLR on random linear models of dimensions from 1 to 20.
         It should return exact results, give of take floating point imprecisions."""

        for cpp in [True, False]:
            if cpp:
                learners.enable_fastlearners(silent_fail=False)
            else:
                learners.disable_fastlearners()

            for _ in range(20):
                n = random.randint(1, 20)
                m = random.randint(1, 5)
                f = random_linear(n, m)
                cfg = {
                    'm_channels': [
                        learners.Channel('x_{}'.format(i), (0.0, 1.0))
                        for i in range(n)
                    ],
                    's_channels': [
                        learners.Channel('y_{}'.format(i), (0.0, 1.0))
                        for i in range(m)
                    ],
                    'm_uniformize':
                    True,
                    'sigma':
                    1.0
                }

                for learner in [
                        learners.LWLRLearner(cfg),
                        learners.ESLWLRLearner(cfg)
                ]:

                    for _ in range(2 * n):
                        x = np.random.rand(n)
                        y = f(x)
                        learner.update(tools.to_signal(x, cfg['m_channels']),
                                       tools.to_signal(y, cfg['s_channels']))

                    for _ in range(10):
                        x = np.random.rand(n).ravel()
                        y = f(x)
                        yp = learner.predict(
                            tools.to_signal(x, cfg['m_channels']))
                        yp = tools.to_vector(yp, cfg['s_channels'])
                        self.assertTrue(
                            np.allclose(y, yp, rtol=1e-5, atol=1e-5))
Exemple #5
0
    def test_lwr1D_linear(self):
        """Simplest test possible (well, not quite, but close)."""
        f = lambda x : 2.0*x

        cfg = {'m_channels'  : [learners.Channel('x', (0.0, 1.0))],
               's_channels'  : [learners.Channel('y', (0.0, 1.0))],
               'm_uniformize': True}

        learner = learners.OptimizeLearner(cfg)

        for _ in range(10):
            x = np.random.rand(1)
            y = f(x)
            learner.update(tools.to_signal(x, cfg['m_channels']),
                           tools.to_signal(y, cfg['s_channels']))

        for _ in range(10):
            y = np.random.rand(1).ravel()
            xp = learner.infer(tools.to_signal(y, cfg['s_channels']))
            xp = np.array(tools.to_vector(xp, cfg['m_channels']))
            self.assertTrue(np.allclose(f(xp), y, rtol = 1e-5, atol = 1e-5))
Exemple #6
0
    def test_lwr1D_linear(self):
        """Simplest test possible (well, not quite, but close)."""
        f = lambda x: 2.0 * x

        cfg = {
            'm_channels': [learners.Channel('x', (0.0, 1.0))],
            's_channels': [learners.Channel('y', (0.0, 1.0))],
            'm_uniformize': True
        }

        learner = learners.OptimizeLearner(cfg)

        for _ in range(10):
            x = np.random.rand(1)
            y = f(x)
            learner.update(tools.to_signal(x, cfg['m_channels']),
                           tools.to_signal(y, cfg['s_channels']))

        for _ in range(10):
            y = np.random.rand(1).ravel()
            xp = learner.infer(tools.to_signal(y, cfg['s_channels']))
            xp = np.array(tools.to_vector(xp, cfg['m_channels']))
            self.assertTrue(np.allclose(f(xp), y, rtol=1e-5, atol=1e-5))
Exemple #7
0
    def test_lwr1D_linear(self):
        """Simplest test possible (well, not quite, but close)."""
        f = lambda x : 2.0*x

        cfg = {'m_channels'  : [learners.Channel('x', (0.0, 1.0))],
               's_channels'  : [learners.Channel('y', (0.0, 1.0))],
               'm_uniformize': True,
               'sigma'       : 0.1}

        for learner in [learners.LWLRLearner(cfg), learners.ESLWLRLearner(cfg)]:

            for _ in range(10):
                x = np.random.rand(1)
                y = f(x)
                learner.update(tools.to_signal(x, cfg['m_channels']),
                               tools.to_signal(y, cfg['s_channels']))

            for _ in range(10):
                x = np.random.rand(1).ravel()
                y = f(x)
                yp = learner.predict(tools.to_signal(x, cfg['m_channels']))
                yp = tools.to_vector(yp, cfg['s_channels'])
                self.assertTrue(np.allclose(y, yp, rtol = 1e-5, atol = 1e-5))
Exemple #8
0
    def test_lwr_linear(self):
        """Test LWLR on random linear models of dimensions from 1 to 20.
         It should return exact results, give of take floating point imprecisions."""

        for _ in range(20):
            n = random.randint(1, 20)
            m = random.randint(1, 5)
            f = random_linear(n, m)
            cfg = {
                'm_channels': [
                    learners.Channel('x_{}'.format(i), (0.0, 1.0))
                    for i in range(n)
                ],
                's_channels': [
                    learners.Channel('y_{}'.format(i), (0.0, 1.0))
                    for i in range(m)
                ],
                'm_uniformize':
                True,
                'options.maxiter':
                500
            }

            learner = learners.OptimizeLearner(cfg)

            for _ in range(4 * n):
                x = np.random.rand(n)
                y = f(x)
                learner.update(tools.to_signal(x, cfg['m_channels']),
                               tools.to_signal(y, cfg['s_channels']))

            for _ in range(10):
                x = np.random.rand(n).ravel()
                y = f(x)
                xp = learner.infer(tools.to_signal(y, cfg['s_channels']))
                xp = np.array(tools.to_vector(xp, cfg['m_channels']))
                self.assertTrue(np.allclose(f(xp), y, rtol=1e-1, atol=1e-1))
Exemple #9
0
 def _execute(self, m_signal, meta=None):
     m_vector = tools.to_vector(m_signal, self.m_channels)
     s_vector = (m_vector[0] + m_vector[1], m_vector[0]*m_vector[1])
     return tools.to_signal(s_vector, self.s_channels)
Exemple #10
0
 def _execute(self, m_signal, meta=None):
     m_vector = np.array([[m_signal[c.name] for c in self.m_channels]])
     s_vector = (np.dot(self.m, m_vector.T).T)[0]
     return tools.to_signal(s_vector, self.s_channels)
Exemple #11
0
 def _execute(self, m_signal, meta=None):
     m_vector = tools.to_vector(m_signal, self.m_channels)
     s_vector = (m_vector[0] + m_vector[1], m_vector[0] * m_vector[1])
     return tools.to_signal(s_vector, self.s_channels)
Exemple #12
0
 def _execute(self, m_signal, meta=None):
     m_vector = np.array([[m_signal[c.name] for c in self.m_channels]])
     s_vector = (np.dot(self.m, m_vector.T).T)[0]
     return tools.to_signal(s_vector, self.s_channels)