Exemplo n.º 1
0
    def testRTLSaveLoad(self):
        if self.disable_all:
            return

        input_c = tf.keras.layers.Input(shape=(1, ))
        input_d = tf.keras.layers.Input(shape=(1, ))
        input_e = tf.keras.layers.Input(shape=(1, ))
        input_f = tf.keras.layers.Input(shape=(1, ))

        input_keypoints = np.linspace(0.0, 1.0, 10)
        calib_c = pwl_calibration_layer.PWLCalibration(
            units=2,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0)(input_c)
        calib_d = pwl_calibration_layer.PWLCalibration(
            units=3,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0)(input_d)
        calib_e = pwl_calibration_layer.PWLCalibration(
            units=4,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0,
            monotonicity="decreasing")(input_e)
        calib_f = pwl_calibration_layer.PWLCalibration(
            units=5,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0,
            monotonicity="decreasing")(input_f)

        rtl_0 = rtl_layer.RTL(num_lattices=10,
                              lattice_rank=3,
                              output_min=0.0,
                              output_max=1.0,
                              separate_outputs=True)
        rtl_0_outputs = rtl_0({
            "unconstrained": [calib_c, calib_d],
            "increasing": [calib_e, calib_f]
        })
        rtl_1 = rtl_layer.RTL(num_lattices=3, lattice_rank=4)
        rtl_1_outputs = rtl_1(rtl_0_outputs)
        outputs = linear_layer.Linear(num_input_dims=3,
                                      monotonicities=[1] * 3)(rtl_1_outputs)
        model = tf.keras.Model(inputs=[input_c, input_d, input_e, input_f],
                               outputs=outputs)
        model.compile(loss="mse")

        with tempfile.NamedTemporaryFile(suffix=".h5") as f:
            model.save(f.name)
            _ = tf.keras.models.load_model(
                f.name,
                custom_objects={
                    "RTL": rtl_layer.RTL,
                    "PWLCalibration": pwl_calibration_layer.PWLCalibration,
                    "Linear": linear_layer.Linear,
                })
Exemplo n.º 2
0
 def _CreateKerasLayer(self, config):
   missing_input_value = config["missing_input_value"]
   if config["use_separate_missing"]:
     # We use 'config["missing_input_value"]' to create the is_missing tensor,
     # and we want the model to use the is_missing tensor so we don't pass
     # a missing_input_value to the model.
     missing_input_value = None
   return keras_layer.PWLCalibration(
       input_keypoints=config["input_keypoints"],
       units=config["units"],
       output_min=config["output_min"],
       output_max=config["output_max"],
       clamp_min=config["clamp_min"],
       clamp_max=config["clamp_max"],
       monotonicity=config["monotonicity"],
       convexity=config["convexity"],
       is_cyclic=config["is_cyclic"],
       kernel_initializer=config["initializer"],
       kernel_regularizer=config["kernel_regularizer"],
       impute_missing=config["impute_missing"],
       missing_output_value=config["missing_output_value"],
       missing_input_value=missing_input_value,
       num_projection_iterations=config["num_projection_iterations"],
       dtype=config["dtype"])
Exemplo n.º 3
0
    def testRTLInputShapes(self):
        if self.disable_all:
            return
        data_size = 100

        # Dense input format.
        a = np.random.random_sample(size=(data_size, 10))
        b = np.random.random_sample(size=(data_size, 20))
        target_ab = (np.max(a, axis=1, keepdims=True) +
                     np.min(b, axis=1, keepdims=True))

        input_a = tf.keras.layers.Input(shape=(10, ))
        input_b = tf.keras.layers.Input(shape=(20, ))

        rtl_0 = rtl_layer.RTL(num_lattices=6, lattice_rank=5)
        rtl_outputs = rtl_0({"unconstrained": input_a, "increasing": input_b})
        outputs = tf.keras.layers.Dense(1)(rtl_outputs)
        model = tf.keras.Model(inputs=[input_a, input_b], outputs=outputs)
        model.compile(loss="mse")
        model.fit([a, b], target_ab)
        model.predict([a, b])

        # Inputs to be calibrated.
        c = np.random.random_sample(size=(data_size, 1))
        d = np.random.random_sample(size=(data_size, 1))
        e = np.random.random_sample(size=(data_size, 1))
        f = np.random.random_sample(size=(data_size, 1))
        target_cdef = np.sin(np.pi * c) * np.cos(np.pi * d) - e * f

        input_c = tf.keras.layers.Input(shape=(1, ))
        input_d = tf.keras.layers.Input(shape=(1, ))
        input_e = tf.keras.layers.Input(shape=(1, ))
        input_f = tf.keras.layers.Input(shape=(1, ))

        input_keypoints = np.linspace(0.0, 1.0, 10)
        calib_c = pwl_calibration_layer.PWLCalibration(
            units=2,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0)(input_c)
        calib_d = pwl_calibration_layer.PWLCalibration(
            units=3,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0)(input_d)
        calib_e = pwl_calibration_layer.PWLCalibration(
            units=4,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0,
            monotonicity="decreasing")(input_e)
        calib_f = pwl_calibration_layer.PWLCalibration(
            units=5,
            input_keypoints=input_keypoints,
            output_min=0.0,
            output_max=1.0,
            monotonicity="decreasing")(input_f)

        rtl_0 = rtl_layer.RTL(num_lattices=10, lattice_rank=3)
        rtl_0_outputs = rtl_0({
            "unconstrained": [calib_c, calib_d],
            "increasing": [calib_e, calib_f]
        })
        outputs = linear_layer.Linear(num_input_dims=10,
                                      monotonicities=[1] * 10)(rtl_0_outputs)
        model = tf.keras.Model(inputs=[input_c, input_d, input_e, input_f],
                               outputs=outputs)
        model.compile(loss="mse")
        model.fit([c, d, e, f], target_cdef)
        model.predict([c, d, e, f])

        # Two layer RTL model.
        rtl_0 = rtl_layer.RTL(num_lattices=10,
                              lattice_rank=3,
                              output_min=0.0,
                              output_max=1.0,
                              separate_outputs=True)
        rtl_0_outputs = rtl_0({
            "unconstrained": [calib_c, calib_d],
            "increasing": [calib_e, calib_f]
        })
        rtl_1 = rtl_layer.RTL(num_lattices=3, lattice_rank=4)
        rtl_1_outputs = rtl_1(rtl_0_outputs)
        outputs = linear_layer.Linear(num_input_dims=3,
                                      monotonicities=[1] * 3)(rtl_1_outputs)
        model = tf.keras.Model(inputs=[input_c, input_d, input_e, input_f],
                               outputs=outputs)
        model.compile(loss="mse")
        model.fit([c, d, e, f], target_cdef)
        model.predict([c, d, e, f])
Exemplo n.º 4
0
    def _TrainModel(self, config, plot_path=None):
        """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        piecewise linear layer to train.
      plot_path: if specified - png file name to save visualization. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
        logging.info("Testing config:")
        logging.info(config)
        if plot_path is not None and config["units"] > 1:
            raise ValueError("Test config error. "
                             "Can not plot multi unit calibrators.")
        config = self._SetDefaults(config)

        self._ResetAllBackends()

        # The input to the model can either be single or multi dimensional.
        input_units = 1 if config["one_d_input"] else config["units"]

        training_inputs = config["x_generator"](
            units=input_units,
            num_points=config["num_training_records"],
            input_min=config["input_keypoints"][0],
            input_max=config["input_keypoints"][-1],
            missing_probability=config["missing_probability"],
            missing_input_value=config["missing_input_value"])
        training_labels = [config["y_function"](x) for x in training_inputs]

        # Either create multiple PWLCalibration layers and combine using a
        # ParallelCombination layer, or create a single PWLCalibration with multiple
        # output dimensions.
        if config["use_multi_calibration_layer"]:
            num_calibration_layers = config["units"]
            pwl_calibration_units = 1
        else:
            num_calibration_layers = 1
            pwl_calibration_units = config["units"]

        model = keras.models.Sequential()
        model.add(tf.keras.layers.Input(shape=[input_units], dtype=tf.float32))
        calibration_layers = []
        for _ in range(num_calibration_layers):
            calibration_layers.append(
                pwl_calibraion.PWLCalibration(
                    units=pwl_calibration_units,
                    dtype=tf.float32,
                    input_keypoints=config["input_keypoints"],
                    output_min=config["output_min"],
                    output_max=config["output_max"],
                    clamp_min=config["clamp_min"],
                    clamp_max=config["clamp_max"],
                    monotonicity=config["monotonicity"],
                    convexity=config["convexity"],
                    is_cyclic=config["is_cyclic"],
                    kernel_initializer=config["initializer"],
                    kernel_regularizer=config["kernel_regularizer"],
                    impute_missing=config["impute_missing"],
                    missing_output_value=config["missing_output_value"],
                    missing_input_value=config["missing_input_value"],
                    num_projection_iterations=config[
                        "num_projection_iterations"]))
        if len(calibration_layers) == 1:
            if config["use_separate_missing"]:
                model.add(
                    CalibrateWithSeparateMissing(
                        calibration_layer=calibration_layers[0],
                        missing_input_value=config["missing_input_value"]))
            else:
                model.add(calibration_layers[0])
        else:
            model.add(
                parallel_combination.ParallelCombination(calibration_layers))

        if config["units"] > 1:
            model.add(
                keras.layers.Lambda(
                    lambda x: tf.reduce_mean(x, axis=1, keepdims=True)))

        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=config["optimizer"](
                          learning_rate=config["learning_rate"]))

        training_data = (training_inputs, training_labels, training_inputs)

        loss = test_utils.run_training_loop(config=config,
                                            training_data=training_data,
                                            keras_model=model,
                                            plot_path=plot_path)

        assetion_ops = []
        for calibration_layer in calibration_layers:
            assetion_ops.extend(
                calibration_layer.assert_constraints(
                    eps=config["constraint_assertion_eps"]))
        if not tf.executing_eagerly() and assetion_ops:
            tf.compat.v1.keras.backend.get_session().run(assetion_ops)

        return loss