Esempio n. 1
0
    def test_mixture_of_experts_problem_with_incompatible_networks(self):
        with self.assertRaisesRegexp(ValueError, "different input shapes"):
            architectures.mixture_of_experts(networks=self.networks +
                                             [layers.Input(10)])

        with self.assertRaisesRegexp(ValueError, "different output shapes"):
            architectures.mixture_of_experts(
                networks=self.networks + [layers.Input(1) > layers.Relu(10)])
Esempio n. 2
0
    def test_mixture_of_experts_init_gating_network_exceptions(self):
        with self.assertRaisesRegexp(ValueError, "Invalid type"):
            architectures.mixture_of_experts(
                networks=self.networks,
                gating_layer=(layers.Input(1) > layers.Softmax(2)))

        with self.assertRaisesRegexp(ValueError, "invalid number of outputs"):
            architectures.mixture_of_experts(networks=self.networks,
                                             gating_layer=layers.Softmax(10))
Esempio n. 3
0
 def test_mixture_of_experts_non_network_inputs(self):
     error_message = (
         "Invalid input, Mixture of experts expects networks/layers"
     )
     with self.assertRaisesRegexp(TypeError, error_message):
         architectures.mixture_of_experts([
             layers.Input(5) >> layers.Relu(10),
             [layers.Input(5), layers.Relu(10)]
         ])
Esempio n. 4
0
 def test_mixture_of_experts_undefined_features(self):
     error_message = (
         "Cannot create mixture of experts model, because "
         "number of input features is unknown"
     )
     with self.assertRaisesRegexp(ValueError, error_message):
         architectures.mixture_of_experts([
             layers.Input(None) >> layers.Relu(10),
             layers.Input(None) >> layers.Relu(10),
         ])
Esempio n. 5
0
    def test_mixture_of_experts_problem_with_incompatible_networks(self):
        error_message = "Networks have incompatible input shapes."
        with self.assertRaisesRegexp(ValueError, error_message):
            architectures.mixture_of_experts(
                networks=self.networks + [layers.Input(10)])

        error_message = "Networks have incompatible output shapes."
        with self.assertRaisesRegexp(ValueError, error_message):
            architectures.mixture_of_experts(
                networks=self.networks + [
                    layers.Input(1) >> layers.Relu(10)
                ])
Esempio n. 6
0
    def test_mixture_of_experts_init_gating_network_exceptions(self):
        with self.assertRaisesRegexp(ValueError, "Invalid type"):
            architectures.mixture_of_experts(
                networks=self.networks,
                gating_layer=(layers.Input(1) >> layers.Softmax(2)))

        error_message = (
            "Gating layer can work only for combining only "
            "10 networks, got 2 networks instead."
        )
        with self.assertRaisesRegexp(LayerConnectionError, error_message):
            architectures.mixture_of_experts(
                networks=self.networks,
                gating_layer=layers.Softmax(10))
Esempio n. 7
0
    def test_mixture_of_experts_problem_with_specific_network(self):
        with self.assertRaisesRegexp(ValueError, "specified as a list"):
            architectures.mixture_of_experts(*self.networks)

        with self.assertRaisesRegexp(ValueError, "has more than one input"):
            last_network = layers.join(
                layers.parallel(
                    layers.Input(1),
                    layers.Input(2),
                ),
                layers.Concatenate(),
            )
            architectures.mixture_of_experts(
                networks=self.networks + [last_network])

        with self.assertRaisesRegexp(ValueError, "has more than one output"):
            last_network = layers.join(
                layers.Input(1),
                layers.parallel(
                    layers.Softmax(1),
                    layers.Softmax(1),
                ),
            )
            architectures.mixture_of_experts(
                networks=self.networks + [last_network])

        error_message = (
            "Each network from the mixture of experts has to "
            "process only 2-dimensional inputs. Network #2.+"
            "Input layer's shape: \(\?, 1, 1, 1\)"
        )
        with self.assertRaisesRegexp(ValueError, error_message):
            last_network = layers.Input((1, 1, 1))
            architectures.mixture_of_experts(
                networks=self.networks + [last_network])
Esempio n. 8
0
    def test_mixture_of_experts_architecture(self):
        network = architectures.mixture_of_experts([
            layers.join(
                layers.Input(10),
                layers.Relu(5),
            ),
            layers.join(
                layers.Input(10),
                layers.Relu(20),
                layers.Relu(5),
            ),
            layers.join(
                layers.Input(10),
                layers.Relu(30),
                layers.Relu(40),
                layers.Relu(5),
            ),
        ])

        self.assertEqual(len(network), 12)
        self.assertEqual(network.input_shape, (10, ))
        self.assertEqual(network.output_shape, (5, ))

        predict = network.compile()
        random_input = asfloat(np.random.random((3, 10)))
        prediction = predict(random_input)

        self.assertEqual(prediction.shape, (3, 5))
Esempio n. 9
0
    def test_mixture_of_experts_multi_class_classification(self):
        import copy
        insize, outsize = (10, 3)
        n_epochs = 10

        default_configs = dict(step=0.1,
                               batch_size=10,
                               error='categorical_crossentropy',
                               verbose=False)

        architecture = layers.join(layers.Input(insize), layers.Relu(20),
                                   layers.Softmax(outsize))

        data, target = datasets.make_classification(n_samples=200,
                                                    n_features=insize,
                                                    n_classes=outsize,
                                                    n_clusters_per_class=2,
                                                    n_informative=5)

        input_scaler = preprocessing.MinMaxScaler((-1, 1))
        one_hot = preprocessing.OneHotEncoder()

        target = target.reshape((-1, 1))
        encoded_target = one_hot.fit_transform(target)
        x_train, x_test, y_train, y_test = model_selection.train_test_split(
            input_scaler.fit_transform(data),
            np.asarray(encoded_target.todense()),
            test_size=0.2)

        # -------------- Train single GradientDescent -------------- #

        bpnet = algorithms.Momentum(copy.deepcopy(architecture),
                                    **default_configs)

        bpnet.train(x_train, y_train, epochs=n_epochs)
        network_output = bpnet.predict(x_test)

        network_error = categorical_crossentropy(y_test, network_output)

        # -------------- Train ensemlbe -------------- #

        moe = algorithms.Momentum(
            architectures.mixture_of_experts([
                copy.deepcopy(architecture),
                copy.deepcopy(architecture),
                copy.deepcopy(architecture),
            ]), **default_configs)
        moe.train(x_train, y_train, epochs=n_epochs)
        ensemble_output = moe.predict(x_test)

        ensemlbe_error = categorical_crossentropy(y_test, ensemble_output)
        self.assertGreater(network_error, ensemlbe_error)
Esempio n. 10
0
    def test_mixture_of_experts_problem_with_specific_network(self):
        with self.assertRaisesRegexp(ValueError, "specified as a list"):
            architectures.mixture_of_experts(*self.networks)

        with self.assertRaisesRegexp(ValueError, "has more than one input"):
            architectures.mixture_of_experts(networks=self.networks + [
                [layers.Input(1), layers.Input(1)] > layers.Softmax(1),
            ])

        with self.assertRaisesRegexp(ValueError, "has more than one output"):
            architectures.mixture_of_experts(networks=self.networks + [
                layers.Input(1) > [layers.Softmax(1),
                                   layers.Softmax(1)],
            ])

        with self.assertRaisesRegexp(ValueError, "should receive vector"):
            architectures.mixture_of_experts(networks=self.networks +
                                             [layers.Input((1, 1, 1))])
Esempio n. 11
0
aresult = adanet.predict(x_test)

aerror = estimators.rmse(aresult, y_test)
'''

network = architectures.mixture_of_experts([
    layers.join(
        layers.Input(58),
        layers.Softmax(22),
        layers.Softmax(1),
    ),
    layers.join(
        layers.Input(58),
        layers.Relu(60),
        layers.Relu(40),
        layers.Softmax(22),
        layers.Softmax(1),
    ),
    layers.join(
        layers.Input(58),
        layers.Tanh(12),
        layers.Tanh(25),
        layers.Tanh(1),
    ),
])

network
gdnet = algorithms.Adam(network, verbose=True)
print("Start moe training")

gdnet.train(x_train, y_train, epochs=500)
Esempio n. 12
0
df = df[df[' timedelta'] > 60]

#Conduct PCA

data = df[df.columns[2:60]]

target = df[' shares'].ravel()

data_norm = StandardScaler().fit_transform(data)

x_train, x_test, y_train, y_test = train_test_split(data_norm,
                                                    target,
                                                    test_size=0.3)

network = architectures.mixture_of_experts([
    algorithms.GRNN(std=0.5, verbose=True),
    algorithms.PNN(std=0.1, verbose=True),
])

network.train(x_train, y_train, epochs=500)

result = network.predict(x_test)

error = estimators.rmse(result, y_test)

print("GRNN RMSE = {}\n".format(error))

r2_score = metrics.r2_score(result, y_test)

print("GRNN R_SCORE = {}\n".format(r2_score))