Esempio n. 1
0
    def test_fit_predict_regularized(self):
        incorrect = [
            'helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship',
            'hubby', 'krazii', 'mite', 'tropic'
        ]
        correct = [
            'hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship',
            'husband', 'crazy', 'might', 'topic'
        ]
        training = zip(incorrect, correct)

        fe = StringPairFeatureExtractor(match=True, numeric=True)
        xf = fe.fit_transform(training)

        model = Hacrf(l2_regularization=10.0)
        model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        print(model.parameters)

        expected_parameters = np.array([[-0.0569188, 0.07413339, 0.],
                                        [0.00187709, -0.06377866, 0.],
                                        [-0.01908823, 0.00586189, 0.],
                                        [0.01721114, -0.00636556, 0.],
                                        [0.01578279, 0.0078614, 0.],
                                        [-0.0139057, -0.00862948, 0.],
                                        [-0.00623241, 0.02937325, 0.],
                                        [0.00810951, -0.01774676, 0.]])
        assert_array_almost_equal(model.parameters,
                                  expected_parameters,
                                  decimal=TEST_PRECISION)

        expected_probas = np.array([[0.5227226, 0.4772774],
                                    [0.52568993, 0.47431007],
                                    [0.4547091, 0.5452909],
                                    [0.51179222, 0.48820778],
                                    [0.46347576, 0.53652424],
                                    [0.45710098, 0.54289902],
                                    [0.46159657, 0.53840343],
                                    [0.42997978, 0.57002022],
                                    [0.47419724, 0.52580276],
                                    [0.50797852, 0.49202148]])
        actual_predict_probas = model.predict_proba(xf)
        print(actual_predict_probas)
        assert_array_almost_equal(actual_predict_probas,
                                  expected_probas,
                                  decimal=TEST_PRECISION)

        expected_predictions = np.array([0, 0, 1, 0, 1, 1, 1, 1, 1, 0])
        actual_predictions = model.predict(xf)
        assert_array_almost_equal(actual_predictions,
                                  expected_predictions,
                                  decimal=TEST_PRECISION)
Esempio n. 2
0
    def test_fit_predict(self):
        incorrect = [
            'helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship',
            'hubby', 'krazii', 'mite', 'tropic'
        ]
        correct = [
            'hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship',
            'husband', 'crazy', 'might', 'topic'
        ]
        training = zip(incorrect, correct)

        fe = StringPairFeatureExtractor(match=True, numeric=True)
        xf = fe.fit_transform(training)

        model = Hacrf()
        model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])

        expected_parameters = np.array([[-10.76945326, 144.03414923, 0.],
                                        [31.84369748, -106.41885651, 0.],
                                        [-52.08919467, 4.56943665, 0.],
                                        [31.01495044, -13.0593297, 0.],
                                        [49.77302218, -6.42566204, 0.],
                                        [-28.69877796, 24.47127009, 0.],
                                        [-85.34524911, 21.87370646, 0.],
                                        [106.41949333, 6.18587125, 0.]])
        print(model.parameters)
        assert_array_almost_equal(model.parameters,
                                  expected_parameters,
                                  decimal=TEST_PRECISION)

        expected_probas = np.array([[1.00000000e+000, 3.51235685e-039],
                                    [1.00000000e+000, 4.79716208e-039],
                                    [1.00000000e+000, 2.82744641e-139],
                                    [1.00000000e+000, 6.49580729e-012],
                                    [9.99933798e-001, 6.62022561e-005],
                                    [8.78935957e-005, 9.99912106e-001],
                                    [4.84538335e-009, 9.99999995e-001],
                                    [1.25170233e-250, 1.00000000e+000],
                                    [2.46673086e-010, 1.00000000e+000],
                                    [1.03521293e-033, 1.00000000e+000]])
        actual_predict_probas = model.predict_proba(xf)
        print(actual_predict_probas)
        assert_array_almost_equal(actual_predict_probas,
                                  expected_probas,
                                  decimal=TEST_PRECISION)

        expected_predictions = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        actual_predictions = model.predict(xf)
        assert_array_almost_equal(actual_predictions,
                                  expected_predictions,
                                  decimal=TEST_PRECISION)
Esempio n. 3
0
    def test_fit_predict_regularized_viterbi(self):
        incorrect = [
            'helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship',
            'hubby', 'krazii', 'mite', 'tropic'
        ]
        correct = [
            'hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship',
            'husband', 'crazy', 'might', 'topic'
        ]
        training = zip(incorrect, correct)

        fe = StringPairFeatureExtractor(match=True, numeric=True)
        xf = fe.fit_transform(training)

        model = Hacrf(l2_regularization=10.0, viterbi=True)
        model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        print(model.parameters)

        expected_parameters = np.array([[-0.0569188, 0.07413339, 0.],
                                        [0.00187709, -0.06377866, 0.],
                                        [-0.01908823, 0.00586189, 0.],
                                        [0.01721114, -0.00636556, 0.],
                                        [0.01578279, 0.0078614, 0.],
                                        [-0.0139057, -0.00862948, 0.],
                                        [-0.00623241, 0.02937325, 0.],
                                        [0.00810951, -0.01774676, 0.]])
        assert_array_almost_equal(model.parameters,
                                  expected_parameters,
                                  decimal=TEST_PRECISION)

        expected_probas = np.array([[0.56394611, 0.43605389],
                                    [0.52977205, 0.47022795],
                                    [0.4751729, 0.5248271],
                                    [0.51183761, 0.48816239],
                                    [0.48608081, 0.51391919],
                                    [0.4986367, 0.5013633],
                                    [0.46947222, 0.53052778],
                                    [0.43233544, 0.56766456],
                                    [0.47463002, 0.52536998],
                                    [0.51265109, 0.48734891]])
        actual_predict_probas = model.predict_proba(xf)
        print(actual_predict_probas)
        assert_array_almost_equal(actual_predict_probas,
                                  expected_probas,
                                  decimal=TEST_PRECISION)

        expected_predictions = np.array([0, 0, 1, 0, 1, 1, 1, 1, 1, 0])
        actual_predictions = model.predict(xf)
        assert_array_almost_equal(actual_predictions,
                                  expected_predictions,
                                  decimal=TEST_PRECISION)
Esempio n. 4
0
    def test_fit_predict_regularized(self):
        incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
        correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
        training = zip(incorrect, correct)

        fe = StringPairFeatureExtractor(match=True, numeric=True)
        xf = fe.fit_transform(training)

        model = Hacrf(l2_regularization=10.0)
        model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        print(model.parameters)

        expected_parameters = np.array([[-0.0569188, 0.07413339, 0.],
                                        [0.00187709, -0.06377866, 0.],
                                        [-0.01908823, 0.00586189, 0.],
                                        [0.01721114, -0.00636556, 0.],
                                        [0.01578279, 0.0078614, 0.],
                                        [-0.0139057, -0.00862948, 0.],
                                        [-0.00623241, 0.02937325, 0.],
                                        [0.00810951, -0.01774676, 0.]])
        assert_array_almost_equal(model.parameters, expected_parameters, 
                                  decimal=TEST_PRECISION)

        expected_probas = np.array([[0.5227226, 0.4772774],
                                    [0.52568993, 0.47431007],
                                    [0.4547091, 0.5452909],
                                    [0.51179222, 0.48820778],
                                    [0.46347576, 0.53652424],
                                    [0.45710098, 0.54289902],
                                    [0.46159657, 0.53840343],
                                    [0.42997978, 0.57002022],
                                    [0.47419724, 0.52580276],
                                    [0.50797852, 0.49202148]])
        actual_predict_probas = model.predict_proba(xf)
        print(actual_predict_probas)
        assert_array_almost_equal(actual_predict_probas, expected_probas, 
                                  decimal=TEST_PRECISION)

        expected_predictions = np.array([0, 0, 1, 0, 1, 1, 1, 1, 1, 0])
        actual_predictions = model.predict(xf)
        assert_array_almost_equal(actual_predictions, expected_predictions, 
                                  decimal=TEST_PRECISION)
Esempio n. 5
0
    def test_fit_predict(self):
        incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
        correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
        training = zip(incorrect, correct)

        fe = StringPairFeatureExtractor(match=True, numeric=True)
        xf = fe.fit_transform(training)

        model = Hacrf()
        model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])

        expected_parameters = np.array([[-10.76945326, 144.03414923, 0.],
                                        [31.84369748, -106.41885651, 0.],
                                        [-52.08919467, 4.56943665, 0.],
                                        [31.01495044, -13.0593297, 0.],
                                        [49.77302218, -6.42566204, 0.],
                                        [-28.69877796, 24.47127009, 0.],
                                        [-85.34524911, 21.87370646, 0.],
                                        [106.41949333, 6.18587125, 0.]])
        print(model.parameters)
        assert_array_almost_equal(model.parameters, expected_parameters,
                                  decimal=TEST_PRECISION)

        expected_probas = np.array([[1.00000000e+000, 3.51235685e-039],
                                    [1.00000000e+000, 4.79716208e-039],
                                    [1.00000000e+000, 2.82744641e-139],
                                    [1.00000000e+000, 6.49580729e-012],
                                    [9.99933798e-001, 6.62022561e-005],
                                    [8.78935957e-005, 9.99912106e-001],
                                    [4.84538335e-009, 9.99999995e-001],
                                    [1.25170233e-250, 1.00000000e+000],
                                    [2.46673086e-010, 1.00000000e+000],
                                    [1.03521293e-033, 1.00000000e+000]])
        actual_predict_probas = model.predict_proba(xf)
        print(actual_predict_probas)
        assert_array_almost_equal(actual_predict_probas, expected_probas,
                                  decimal=TEST_PRECISION)

        expected_predictions = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        actual_predictions = model.predict(xf)
        assert_array_almost_equal(actual_predictions, expected_predictions,
                                  decimal=TEST_PRECISION)
Esempio n. 6
0
    def test_fit_predict_regularized_viterbi(self):
        incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
        correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
        training = zip(incorrect, correct)

        fe = StringPairFeatureExtractor(match=True, numeric=True)
        xf = fe.fit_transform(training)

        model = Hacrf(l2_regularization=10.0, viterbi=True)
        model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        print(model.parameters)

        expected_parameters = np.array([[-0.0569188, 0.07413339, 0.],
                                        [0.00187709, -0.06377866, 0.],
                                        [-0.01908823, 0.00586189, 0.],
                                        [0.01721114, -0.00636556, 0.],
                                        [0.01578279, 0.0078614, 0.],
                                        [-0.0139057, -0.00862948, 0.],
                                        [-0.00623241, 0.02937325, 0.],
                                        [0.00810951, -0.01774676, 0.]])
        assert_array_almost_equal(model.parameters, expected_parameters,
                                  decimal=TEST_PRECISION)

        expected_probas = np.array([[0.56394611, 0.43605389],
                                    [0.52977205, 0.47022795],
                                    [0.4751729, 0.5248271],
                                    [0.51183761, 0.48816239],
                                    [0.48608081, 0.51391919],
                                    [0.4986367, 0.5013633],
                                    [0.46947222, 0.53052778],
                                    [0.43233544, 0.56766456],
                                    [0.47463002, 0.52536998],
                                    [0.51265109, 0.48734891]])
        actual_predict_probas = model.predict_proba(xf)
        print(actual_predict_probas)
        assert_array_almost_equal(actual_predict_probas, expected_probas,
                                  decimal=TEST_PRECISION)

        expected_predictions = np.array([0, 0, 1, 0, 1, 1, 1, 1, 1, 0])
        actual_predictions = model.predict(xf)
        assert_array_almost_equal(actual_predictions, expected_predictions,
                                  decimal=TEST_PRECISION)