Beispiel #1
0
    def provide_regression_model(self, features_and_labels):
        t.manual_seed(12)

        model = PytorchModel(features_and_labels, RegressionModule, nn.MSELoss,
                             lambda params: SGD(params, lr=0.01, momentum=0.0))

        return model
    def test_callbacks(self):
        # a test with a early stopping callback and pass restore_best_weights=True as kwarg
        df = pd.DataFrame({
            "a": [
                1,
                0,
                1,
                0,
                1,
                0,
                1,
                0,
            ],
            "b": [
                0,
                1,
                0,
                1,
                1,
                0,
                1,
                0,
            ],
        })

        model = PytorchModel(FeaturesAndLabels(["a", "b"], ["b"]),
                             ClassificationModule, nn.MSELoss,
                             lambda params: SGD(params, lr=0.1, momentum=0.9))

        fit = df.model.fit(
            model,
            on_epoch=[Callbacks.early_stopping(patience=3, tolerance=-100)],
            restore_best_weights=True)
        self.assertEqual(4, len(fit.model.history["loss"]))
    def provide_classification_model(self, features_and_labels):
        t.manual_seed(42)

        model = PytorchModel(features_and_labels, ClassificationModule,
                             nn.MSELoss, lambda params: SGD(params, lr=0.03))

        return model
    def provide_auto_encoder_model(self, features_and_labels):
        t.manual_seed(12)

        model = AutoEncoderModel(
            PytorchModel(features_and_labels, AutoEncoderModule, nn.MSELoss,
                         lambda params: SGD(params, lr=0.1, momentum=0.9)),
            ["condensed"],
            lambda m: m.module.encode,
            lambda m: m.module.decode,
        )

        return model
Beispiel #5
0
    def test_callbacks(self):
        # a test with a early stopping callback and pass restore_best_weights=True as kwarg
        df = pd.DataFrame({
            "a": [
                1,
                0,
                1,
                0,
                1,
                0,
                1,
                0,
            ],
            "b": [
                0,
                1,
                0,
                1,
                1,
                0,
                1,
                0,
            ],
        })

        def module_provider():
            class ClassificationModule(nn.Module):
                def __init__(self):
                    super().__init__()
                    self.classifier = nn.Sequential(nn.Linear(2, 5), nn.ReLU(),
                                                    nn.Linear(5, 1),
                                                    nn.Sigmoid())

                def forward(self, x):
                    x = self.classifier(x)
                    return x

            return ClassificationModule()

        model = PytorchModel(FeaturesAndLabels(["a", "b"], ["b"]),
                             module_provider, nn.MSELoss,
                             lambda params: SGD(params, lr=0.1, momentum=0.9))

        fit = df.model.fit(model,
                           on_epoch=[
                               PytorchModel.Callbacks.early_stopping(
                                   patience=3, tolerance=-100)
                           ],
                           restore_best_weights=True)
        print(fit.model._history)
        self.assertEqual(4, len(fit.model._history[0][0]))
Beispiel #6
0
    def provide_auto_encoder_model(self, features_and_labels):
        t.manual_seed(12)

        def module_provider():
            class AutoEncoderModule(nn.Module):
                def __init__(self):
                    super().__init__()
                    self.encoder = nn.Sequential(
                        nn.Linear(2, 2),
                        nn.Tanh(),
                        nn.Linear(2, 1),
                        nn.Tanh(),
                    )

                    self.decoder = nn.Sequential(
                        nn.Linear(1, 2),
                        nn.Tanh(),
                        nn.Linear(2, 2),
                        nn.Tanh(),
                    )

                def forward(self, x):
                    x = self.encoder(x)
                    x = self.decoder(x)
                    return x

                def encode(self, x):
                    with t.no_grad():
                        return self.encoder(t.from_numpy(x).float()).numpy()

                def decode(self, x):
                    with t.no_grad():
                        return self.decoder(t.from_numpy(x).float()).numpy()

            return AutoEncoderModule()

        model = AutoEncoderModel(
            PytorchModel(features_and_labels, module_provider, nn.MSELoss,
                         lambda params: SGD(params, lr=0.1, momentum=0.9)),
            ["condensed"],
            lambda m: m.module.encode,
            lambda m: m.module.decode,
        )

        return model
Beispiel #7
0
    def provide_classification_model(self, features_and_labels):
        t.manual_seed(42)

        def module_provider():
            class ClassificationModule(nn.Module):
                def __init__(self):
                    super().__init__()
                    self.classifier = nn.Sequential(nn.Linear(2, 5), nn.ReLU(),
                                                    nn.Linear(5, 1),
                                                    nn.Sigmoid())

                def forward(self, x):
                    x = self.classifier(x)
                    return x

            return ClassificationModule()

        model = PytorchModel(features_and_labels, module_provider, nn.MSELoss,
                             lambda params: SGD(params, lr=0.03))

        return model
Beispiel #8
0
    def test_soft_dtw_loss(self):
        df = DF_TEST[["Close"]][-21:].copy()

        class LstmAutoEncoder(nn.Module):
            def __init__(self):
                super().__init__()
                self.input_size = 1
                self.seq_size = 10
                self.hidden_size = 2
                self.num_layers = 1
                self.num_directions = 1

                self._encoder =\
                    nn.RNN(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers,
                           batch_first=True)

                self._decoder =\
                    nn.RNN(input_size=self.hidden_size, hidden_size=self.input_size, num_layers=self.num_layers,
                           batch_first=True)

            def forward(self, x):
                # make sure to treat single elements as batches
                x = x.view(-1, self.seq_size, self.input_size)
                batch_size = len(x)

                hidden_encoder = nn.Parameter(
                    t.zeros(self.num_layers * self.num_directions, batch_size,
                            self.hidden_size))
                hidden_decoder = nn.Parameter(
                    t.zeros(self.num_layers * self.num_directions, batch_size,
                            self.input_size))

                x, _ = self._encoder(x, hidden_encoder)
                x = t.repeat_interleave(x[:, -2:-1], x.shape[1], dim=1)
                x, hidden = self._decoder(x, hidden_decoder)
                return x

            def encoder(self, x):
                x = x.reshape(-1, self.seq_size, self.input_size)
                batch_size = len(x)

                with t.no_grad():
                    hidden = nn.Parameter(
                        t.zeros(self.num_layers * self.num_directions,
                                batch_size, self.hidden_size))

                    # return last element of sequence
                    return self._encoder(t.from_numpy(x).float(),
                                         hidden)[0].numpy()[:, -1]

            def decoder(self, x):
                x = x.reshape(-1, self.seq_size, self.hidden_size)
                batch_size = len(x)

                with t.no_grad():
                    hidden = nn.Parameter(
                        t.zeros(self.num_layers * self.num_directions,
                                batch_size, self.input_size))
                    return self._decoder(t.from_numpy(x).float(),
                                         hidden)[0].numpy()

        model = AutoEncoderModel(
            PytorchModel(
                PostProcessedFeaturesAndLabels(df.columns.to_list(),
                                               [lambda df: df.ta.rnn(10)],
                                               df.columns.to_list(),
                                               [lambda df: df.ta.rnn(10)]),
                LstmAutoEncoder, SoftDTW, Adam),
            ["condensed-a", "condensed-b"],
            lambda m: m.module.encoder,
            lambda m: m.module.decoder,
        )

        fit = df.model.fit(model, epochs=100)
        print(fit.test_summary.df)

        encoded = df.model.predict(fit.model.as_encoder())
        print(encoded)