def test_default_config(self):
        random_csv = create_random_csv()
        target = 'D'

        config = Config.make_default_config(
            csv_path=random_csv,
            target_name=target,
            target_type=TargetType.BINARY_CLASSIFICATION,
            train_ratio=0.9)
        self.assertIsNotNone(config)
        remove_random_csv()
    def test_activation_for_regression_is_sigmoid(self):
        config = self.make_config_for_type(TargetType.REGRESSION)
        previous_layer = self.make_sample_layer()

        layer = config.model_assembler.make_final_layer(previous_layer)
        self.check_layer_properties(layer=layer,
                                    activation="Sigmoid",
                                    outputs=1,
                                    operation='dense')

        remove_random_csv()
    def test_activation_for_multiclass_is_softmax(self):
        config = self.make_config_for_type(
            TargetType.MULTICLASS_CLASSIFICATION)
        previous_layer = self.make_sample_layer()

        layer = config.model_assembler.make_final_layer(previous_layer)
        self.check_layer_properties(layer=layer,
                                    activation="Softmax",
                                    outputs=config.unique_classes,
                                    operation='dense')
        remove_random_csv()
    def test_model_params_for_regression_classification(self):
        config = self.make_config_for_type(TargetType.REGRESSION)
        previous_layer = self.make_sample_layer()

        layer = config.model_assembler.make_final_layer(previous_layer)
        model = Model(inputs=previous_layer, outputs=layer)

        model = config.model_assembler.compile_model(model)
        self.check_model_parameters(model=model,
                                    optimizer=keras.optimizers.Adam,
                                    loss="mean_absolute_error",
                                    metrics=[])

        remove_random_csv()
    def test_model_params_for_binary_classification(self):
        config = self.make_config_for_type(TargetType.BINARY_CLASSIFICATION)
        previous_layer = self.make_sample_layer()

        layer = config.model_assembler.make_final_layer(previous_layer)
        model = Model(inputs=previous_layer, outputs=layer)

        model = config.model_assembler.compile_model(model)
        self.check_model_parameters(model=model,
                                    optimizer=keras.optimizers.Adam,
                                    loss="binary_crossentropy",
                                    metrics=["accuracy"])

        remove_random_csv()
    def test_custom_config(self):
        random_csv = create_random_csv()
        target = 'D'

        processor = CustomProcessor()
        assembler = CustomAssembler()

        config = Config.make_custom_config(csv_path=random_csv,
                                           target_name=target,
                                           train_ratio=0.9,
                                           target_processor=processor,
                                           model_assembler=assembler)

        self.assertIsNotNone(config)
        remove_random_csv()
Esempio n. 7
0
    def test_create_and_remove_random_csv(self):
        rows = 5
        cols = 5
        columns = 'ABCDE'

        csv_path = create_random_csv(rows, cols, columns)

        exists_csv = os.path.exists(csv_path)
        self.assertEqual(exists_csv, True)

        df = pd.read_csv(csv_path)
        self.__check_dataframe_data(df, rows, cols, columns)

        remove_random_csv()

        exists_csv = os.path.exists(csv_path)
        self.assertEqual(exists_csv, False)
Esempio n. 8
0
    def test_model_embedding_size(self):

        random_csv = create_random_csv()
        target = 'D'

        config = Config.make_default_config(
            csv_path=random_csv,
            target_name=target,
            target_type=TargetType.BINARY_CLASSIFICATION,
            train_ratio=0.9)

        network = EmbeddingNetwork(config)

        for layer in network.model.layers:
            if isinstance(layer, Embedding):
                embedding_size = int(layer.embeddings.initial_value.shape[1])
                self.assertEqual(
                    get_embedding_size(config.df[layer.name].nunique()),
                    embedding_size)

        remove_random_csv()
 def tearDown(self):
     remove_random_csv()