def test_training(self):
       a= 0
 
       # Create a new learner, but use the same data for each run
       lr = LogisticRegression(self.X_train, self.Y_train, alpha=a)
           
       training_reconstruction = [lr.predict(x) for x in self.X_train]
       residual = sum([ x-y for x,y in zip(training_reconstruction,self.Y_train)])
       
       self.assertTrue(residual<1e-5)
    def test_training(self):
        a = 0

        # Create a new learner, but use the same data for each run
        lr = LogisticRegression(self.X_train, self.Y_train, alpha=a)

        training_reconstruction = [lr.predict(x) for x in self.X_train]
        residual = sum(
            [x - y for x, y in zip(training_reconstruction, self.Y_train)])

        self.assertTrue(residual < 1e-5)
Ejemplo n.º 3
0
    def test_multi_surrogate_models(self):
        metamodel = MetaModel()
        metamodel.name = 'meta'
        metamodel.surrogate = {
            'd': KrigingSurrogate(),
            'c': LogisticRegression()
        }
        metamodel.model = Simple()
        metamodel.recorder = DumbRecorder()
        simple = Simple()

        metamodel.a = simple.a = 1.
        metamodel.b = simple.b = 2.
        metamodel.train_next = True
        simple.run()
        metamodel.run()

        metamodel.a = simple.a = 3.
        metamodel.b = simple.b = 4.
        metamodel.train_next = True
        simple.run()
        metamodel.run()

        self.assertTrue(isinstance(metamodel.d, NormalDistribution))
        self.assertTrue(isinstance(metamodel.c, float))
Ejemplo n.º 4
0
    def test_default_surrogate_change(self):
        metamodel = MetaModel()
        mmins = set(metamodel.list_inputs())
        mmouts = set(metamodel.list_outputs())
        metamodel.default_surrogate = KrigingSurrogate()
        metamodel.model = Simple()
        metamodel.default_surrogate = LogisticRegression()
        attrs = metamodel.get_attributes(io_only=False)
        for s in attrs['Slots']:
            self.assertNotEqual(s['name'], 'c')
            self.assertNotEqual(s['name'], 'd')
        inputs = set(metamodel.list_inputs())
        outputs = set(metamodel.list_outputs())
        self.assertEquals(inputs - mmins, set(['a', 'b']))
        self.assertEquals(outputs - mmouts, set(['c', 'd']))
        for i in range(3):
            metamodel.train_next = True
            metamodel.run()

        self.assertTrue(len(metamodel._training_data['c']) == 3)
        self.assertTrue(len(metamodel._training_data['d']) == 3)
        self.assertTrue(len(metamodel._training_input_history) == 3)

        metamodel.includes = ['a', 'b', 'c', 'd']

        self.assertTrue(len(metamodel._training_data['c']) == 3)
        self.assertTrue(len(metamodel._training_data['d']) == 3)
        self.assertTrue(len(metamodel._training_input_history) == 3)

        # removing an output should not clobber the rest of the training data
        metamodel.includes = ['a', 'b', 'c']

        self.assertTrue(len(metamodel._training_data['c']) == 3)
        self.assertTrue('d' not in metamodel._training_data)
        self.assertTrue(len(metamodel._training_input_history) == 3)

        # now put a different model in with the same inputs/outputs
        metamodel.model = SimpleMatch()
        metamodel.includes = ['a', 'b', 'c', 'd']
        inputs = set(metamodel.list_inputs())
        outputs = set(metamodel.list_outputs())
        self.assertEquals(inputs - mmins, set(['a', 'b']))
        self.assertEquals(outputs - mmouts, set(['c', 'd']))

        self.assertTrue(len(metamodel._training_data['c']) == 0)
        self.assertTrue(len(metamodel._training_data['d']) == 0)
        self.assertTrue(len(metamodel._training_input_history) == 0)

        # now put a different model in
        metamodel.model = Simple2()
        metamodel.includes = ['w', 'x', 'y', 'z']
        inputs = set(metamodel.list_inputs())
        outputs = set(metamodel.list_outputs())
        self.assertEquals(inputs - mmins, set(['w', 'x']))
        self.assertEquals(outputs - mmouts, set(['y', 'z']))
 def test_uncertain_value(self): 
     lr = LogisticRegression(self.X_train, self.Y_train)
     
     self.assertEqual(lr.get_uncertain_value(1.0),1.0)
    def test_uncertain_value(self):
        lr = LogisticRegression()

        self.assertEqual(lr.get_uncertain_value(1.0), 1.0)