Exemplo n.º 1
0
    def testParallelCombinationMultipleInputs(self):
        if self.disable_all:
            return
        input_layers = [keras.layers.Input(shape=[1]) for _ in range(3)]
        all_calibrators = pcl.ParallelCombination(single_output=False)
        for i in range(3):
            # Its not typical to use 1-d Lattice layer for calibration, but lets do it
            # to avoid redundant dependency on PWLCalibration layer.
            calibrator = ll.Lattice(lattice_sizes=[2],
                                    output_min=0.0,
                                    output_max=i + 1.0)
            all_calibrators.append(calibrator)

        # Given output range specified below linear initializer will have lattice to
        # simply sum up inputs.
        simple_sum = ll.Lattice(lattice_sizes=[5] * 3,
                                kernel_initializer="linear_initializer",
                                output_min=0.0,
                                output_max=12.0,
                                name="SummingLattice",
                                trainable=False)

        output = simple_sum(all_calibrators(input_layers))
        model = keras.models.Model(inputs=input_layers, outputs=output)

        test_inputs = [
            np.asarray([[0.0], [0.1], [1.0]]),
            np.asarray([[0.0], [0.2], [1.0]]),
            np.asarray([[0.0], [0.3], [1.0]]),
        ]
        predictions = model.predict(test_inputs)
        print("predictions")
        print(predictions)
        self.assertTrue(
            np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))
Exemplo n.º 2
0
    def testParallelCombinationSingleInput(self):
        if self.disable_all:
            return
        all_calibrators = pcl.ParallelCombination()
        for i in range(3):
            # Its not typical to use 1-d Lattice layer for calibration, but lets do it
            # to avoid redundant dependency on PWLCalibration layer.
            calibrator = ll.Lattice(lattice_sizes=[2],
                                    output_min=0.0,
                                    output_max=i + 1.0)
            all_calibrators.append(calibrator)

        # Given output range specified below linear initializer will have lattice to
        # simply sum up inputs.
        simple_sum = ll.Lattice(lattice_sizes=[5] * 3,
                                kernel_initializer="linear_initializer",
                                output_min=0.0,
                                output_max=12.0,
                                name="SummingLattice")
        model = keras.models.Sequential()
        model.add(all_calibrators)
        model.add(simple_sum)

        test_inputs = np.asarray([
            [0.0, 0.0, 0.0],
            [0.1, 0.2, 0.3],
            [1.0, 1.0, 1.0],
        ])
        predictions = model.predict(test_inputs)
        print("predictions")
        print(predictions)
        self.assertTrue(
            np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))
Exemplo n.º 3
0
    def testParallelCombinationClone(self):
        if self.disable_all:
            return
        input_layers = [keras.layers.Input(shape=[1]) for _ in range(3)]
        all_calibrators = pcl.ParallelCombination(single_output=False)
        for i in range(3):
            # Its not typical to use 1-d Lattice layer for calibration, but lets do it
            # to avoid redundant dependency on PWLCalibration layer.
            calibrator = ll.Lattice(lattice_sizes=[2],
                                    output_min=0.0,
                                    output_max=i + 1.0)
            all_calibrators.append(calibrator)

        # Given output range specified below linear initializer will have lattice to
        # simply sum up inputs.
        simple_sum = ll.Lattice(lattice_sizes=[5] * 3,
                                kernel_initializer="linear_initializer",
                                output_min=0.0,
                                output_max=12.0,
                                name="SummingLattice",
                                trainable=False)

        output = simple_sum(all_calibrators(input_layers))
        model = keras.models.Model(inputs=input_layers, outputs=output)
        clone = keras.models.clone_model(model)

        test_inputs = [
            np.asarray([[0.0], [0.1], [1.0]]),
            np.asarray([[0.0], [0.2], [1.0]]),
            np.asarray([[0.0], [0.3], [1.0]]),
        ]
        predictions = clone.predict(test_inputs)
        print("predictions")
        print(predictions)
        self.assertTrue(
            np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))

        with tempfile.NamedTemporaryFile(suffix=".h5") as f:
            model.save(f.name)
            loaded_model = tf.keras.models.load_model(
                f.name,
                custom_objects={
                    "ParallelCombination": pcl.ParallelCombination,
                    "Lattice": ll.Lattice
                })
            predictions = loaded_model.predict(test_inputs)
            self.assertTrue(
                np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))
Exemplo n.º 4
0
    def _TrainModel(self, config, plot_path=None):
        """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        piecewise linear layer to train.
      plot_path: if specified - png file name to save visualization. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
        logging.info("Testing config:")
        logging.info(config)
        if plot_path is not None and config["units"] > 1:
            raise ValueError("Test config error. "
                             "Can not plot multi unit calibrators.")
        config = self._SetDefaults(config)

        self._ResetAllBackends()

        # The input to the model can either be single or multi dimensional.
        input_units = 1 if config["one_d_input"] else config["units"]

        training_inputs = config["x_generator"](
            units=input_units,
            num_points=config["num_training_records"],
            input_min=config["input_keypoints"][0],
            input_max=config["input_keypoints"][-1],
            missing_probability=config["missing_probability"],
            missing_input_value=config["missing_input_value"])
        training_labels = [config["y_function"](x) for x in training_inputs]

        # Either create multiple PWLCalibration layers and combine using a
        # ParallelCombination layer, or create a single PWLCalibration with multiple
        # output dimensions.
        if config["use_multi_calibration_layer"]:
            num_calibration_layers = config["units"]
            pwl_calibration_units = 1
        else:
            num_calibration_layers = 1
            pwl_calibration_units = config["units"]

        model = keras.models.Sequential()
        model.add(tf.keras.layers.Input(shape=[input_units], dtype=tf.float32))
        calibration_layers = []
        for _ in range(num_calibration_layers):
            calibration_layers.append(
                pwl_calibraion.PWLCalibration(
                    units=pwl_calibration_units,
                    dtype=tf.float32,
                    input_keypoints=config["input_keypoints"],
                    output_min=config["output_min"],
                    output_max=config["output_max"],
                    clamp_min=config["clamp_min"],
                    clamp_max=config["clamp_max"],
                    monotonicity=config["monotonicity"],
                    convexity=config["convexity"],
                    is_cyclic=config["is_cyclic"],
                    kernel_initializer=config["initializer"],
                    kernel_regularizer=config["kernel_regularizer"],
                    impute_missing=config["impute_missing"],
                    missing_output_value=config["missing_output_value"],
                    missing_input_value=config["missing_input_value"],
                    num_projection_iterations=config[
                        "num_projection_iterations"]))
        if len(calibration_layers) == 1:
            if config["use_separate_missing"]:
                model.add(
                    CalibrateWithSeparateMissing(
                        calibration_layer=calibration_layers[0],
                        missing_input_value=config["missing_input_value"]))
            else:
                model.add(calibration_layers[0])
        else:
            model.add(
                parallel_combination.ParallelCombination(calibration_layers))

        if config["units"] > 1:
            model.add(
                keras.layers.Lambda(
                    lambda x: tf.reduce_mean(x, axis=1, keepdims=True)))

        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=config["optimizer"](
                          learning_rate=config["learning_rate"]))

        training_data = (training_inputs, training_labels, training_inputs)

        loss = test_utils.run_training_loop(config=config,
                                            training_data=training_data,
                                            keras_model=model,
                                            plot_path=plot_path)

        assetion_ops = []
        for calibration_layer in calibration_layers:
            assetion_ops.extend(
                calibration_layer.assert_constraints(
                    eps=config["constraint_assertion_eps"]))
        if not tf.executing_eagerly() and assetion_ops:
            tf.compat.v1.keras.backend.get_session().run(assetion_ops)

        return loss
Exemplo n.º 5
0
    def _TrainModel(self, config, plot_path=None):
        """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        piecewise linear layer to train.
      plot_path: if specified - png file name to save visualization. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
        logging.info("Testing config:")
        logging.info(config)
        config = self._SetDefaults(config)

        self._ResetAllBackends()

        if config["default_input_value"] is not None:
            # default_input_value is mapped to the last bucket, hence x_generator
            # needs to generate in [0, ..., num_buckets-2] range.
            num_random_buckets = config["num_buckets"] - 1
        else:
            num_random_buckets = config["num_buckets"]

        # The input to the model can either be single or multi dimensional.
        input_units = 1 if config["one_d_input"] else config["units"]

        training_inputs = config["x_generator"](
            units=input_units,
            num_points=config["num_training_records"],
            num_buckets=num_random_buckets,
            missing_probability=config["missing_probability"],
            default_input_value=config["default_input_value"])
        training_labels = [config["y_function"](x) for x in training_inputs]

        # Either create multiple CategoricalCalibration layers and combine using a
        # ParallelCombination layer, or create a single CategoricalCalibration with
        # multiple output dimensions.
        if config["use_multi_calibration_layer"]:
            num_calibration_layers = config["units"]
            categorical_calibraion_units = 1
        else:
            num_calibration_layers = 1
            categorical_calibraion_units = config["units"]

        model = keras.models.Sequential()
        model.add(keras.layers.Input(shape=[input_units], dtype=tf.int32))
        calibration_layers = []
        for _ in range(num_calibration_layers):
            calibration_layers.append(
                categorical_calibraion.CategoricalCalibration(
                    units=categorical_calibraion_units,
                    kernel_initializer="constant",
                    num_buckets=config["num_buckets"],
                    output_min=config["output_min"],
                    output_max=config["output_max"],
                    monotonicities=config["monotonicities"],
                    kernel_regularizer=config["kernel_regularizer"],
                    default_input_value=config["default_input_value"]))
        if len(calibration_layers) == 1:
            model.add(calibration_layers[0])
        else:
            model.add(
                parallel_combination.ParallelCombination(calibration_layers))
        if config["units"] > 1:
            model.add(
                keras.layers.Lambda(
                    lambda x: tf.reduce_mean(x, axis=1, keepdims=True)))
        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=config["optimizer"](
                          learning_rate=config["learning_rate"]))

        training_data = (training_inputs, training_labels, training_inputs)

        loss = test_utils.run_training_loop(config=config,
                                            training_data=training_data,
                                            keras_model=model,
                                            plot_path=plot_path,
                                            input_dtype=np.int32)

        assetion_ops = []
        for calibration_layer in calibration_layers:
            assetion_ops.extend(
                calibration_layer.assert_constraints(
                    eps=config["constraint_assertion_eps"]))
        if not tf.executing_eagerly() and assetion_ops:
            tf.compat.v1.keras.backend.get_session().run(assetion_ops)

        return loss