def test_predict(self, *mocks):
        # model
        model = CharLoadTFModel(self.model_path, self.label_mapping)
        data_gen = [np.array([[1, 3], [1, 2]])]
        result = model.predict(data_gen)
        self.assertIn("pred", result)
        self.assertEqual((2, 2), np.array(result["pred"]).shape)

        result = model.predict(data_gen, show_confidences=True)
        self.assertIn("pred", result)
        self.assertIn("conf", result)
        self.assertEqual((2, 2, model.num_labels),
                         np.array(result["conf"]).shape)
    def test_fit_and_predict(self, *mocks):
        # model
        model = CharLoadTFModel(self.model_path, self.label_mapping)

        # data for model
        data_gen = [[
            np.array([[1, 3], [1, 2]]),  # x_data
            np.zeros((2, 2, model.num_labels)),  # y_data
        ]]
        cv_gen = data_gen

        # Basic Fit with Validation Data
        with self.assertLogs("DataProfiler.labelers.char_load_tf_model",
                             level="INFO") as logs:
            history, f1, f1_report = model.fit(data_gen,
                                               cv_gen,
                                               reset_weights=True)

        # Ensure info was logged during fit
        self.assertTrue(len(logs.output))

        data_gen = [np.array([[1, 3], [1, 2]])]
        model.predict(data_gen)

        # fit with new labels
        new_label_mapping = {
            "PAD": 0,
            "TEST": 1,
            "NEW": 2,
            "MAPPING": 3,
            model._parameters["default_label"]: 4,
        }
        data_gen = [[
            np.array([[1, 3], [1, 2]]),  # x_data
            np.zeros((2, 2, len(new_label_mapping))),  # y_data
        ]]
        history, f1, f1_report = model.fit(data_gen,
                                           cv_gen,
                                           label_mapping=new_label_mapping)

        # predict after fitting on just the text
        model.predict(data_gen[0][0])