Beispiel #1
0
  def _TrainModel(self, config, plot_path=None):
    """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        linear layer to train.
      plot_path: if specified - png file name to save visualisation. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
    logging.info("Testing config:")
    logging.info(config)
    config = self._SetDefaults(config)

    self._ResetAllBackends()

    training_inputs, training_labels, raw_training_inputs = (
        self._GetTrainingInputsAndLabels(config))

    linear_layer = linl.Linear(
        input_shape=[config["num_input_dims"]],
        num_input_dims=config["num_input_dims"],
        monotonicities=config["monotonicities"],
        monotonic_dominances=config["monotonic_dominances"],
        use_bias=config["use_bias"],
        normalization_order=config["normalization_order"],
        kernel_initializer=keras.initializers.Constant(
            config["kernel_init_constant"]),
        bias_initializer=keras.initializers.Constant(
            config["bias_init_constant"]),
        kernel_regularizer=config["kernel_regularizer"],
        bias_regularizer=config["bias_regularizer"],
        dtype=tf.float32)
    model = keras.models.Sequential()
    model.add(linear_layer)
    optimizer = config["optimizer"](learning_rate=config["learning_rate"])
    model.compile(loss=keras.losses.mean_squared_error, optimizer=optimizer)

    training_data = (training_inputs, training_labels, raw_training_inputs)

    loss = test_utils.run_training_loop(
        config=config,
        training_data=training_data,
        keras_model=model,
        plot_path=plot_path)

    assetion_ops = linear_layer.assert_constraints(
        eps=config["allowed_constraints_violation"])
    if not tf.executing_eagerly() and assetion_ops:
      tf.compat.v1.keras.backend.get_session().run(assetion_ops)
    return loss
Beispiel #2
0
    def _TrainModel(self, config, plot_path=None):
        logging.info("Testing config:")
        logging.info(config)
        config = self._SetDefaults(config)
        self._ResetAllBackends()

        training_inputs, training_labels, raw_training_inputs = (
            self._GetTrainingInputsAndLabels(config))

        units = config["units"]
        input_dims = config["input_dims"]
        lattice_sizes = config["lattice_sizes"]
        if units > 1:
            # In order to test multi 'units' lattice replecate inputs 'units' times
            # and later use just one out of 'units' outputs in order to ensure that
            # multi 'units' lattice trains exactly similar to single 'units' one.
            training_inputs = [
                np.tile(np.expand_dims(x, axis=0), reps=[units, 1])
                for x in training_inputs
            ]
            input_shape = (units, input_dims)
        else:
            input_shape = (input_dims, )

        keras_layer = kfll.KroneckerFactoredLattice(
            lattice_sizes=lattice_sizes,
            units=units,
            num_terms=config["num_terms"],
            monotonicities=config["monotonicities"],
            satisfy_constraints_at_every_step=config[
                "satisfy_constraints_at_every_step"],
            kernel_initializer=config["kernel_initializer"],
            input_shape=input_shape,
            dtype=tf.float32)
        model = keras.models.Sequential()
        model.add(keras_layer)

        # When we use multi-unit lattices, we only extract a single lattice for
        # testing.
        if units > 1:
            lattice_index = config["lattice_index"]
            model.add(
                keras.layers.Lambda(
                    lambda x: x[:, lattice_index:lattice_index + 1]))

        optimizer = config["optimizer"](learning_rate=config["learning_rate"])
        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=optimizer)

        training_data = (training_inputs, training_labels, raw_training_inputs)
        loss = test_utils.run_training_loop(config=config,
                                            training_data=training_data,
                                            keras_model=model,
                                            plot_path=plot_path)

        if tf.executing_eagerly():
            tf.print("final weights: ", keras_layer.kernel)
        assetion_ops = keras_layer.assert_constraints(
            eps=-config["target_monotonicity_diff"])
        if not tf.executing_eagerly() and assetion_ops:
            tf.compat.v1.keras.backend.get_session().run(assetion_ops)

        return loss
Beispiel #3
0
    def _TrainModel(self, config, plot_path=None):
        """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        piecewise linear layer to train.
      plot_path: if specified - png file name to save visualization. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
        logging.info("Testing config:")
        logging.info(config)
        config = self._SetDefaults(config)

        self._ResetAllBackends()

        if config["default_input_value"] is not None:
            # default_input_value is mapped to the last bucket, hence x_generator
            # needs to generate in [0, ..., num_buckets-2] range.
            num_random_buckets = config["num_buckets"] - 1
        else:
            num_random_buckets = config["num_buckets"]

        # The input to the model can either be single or multi dimensional.
        input_units = 1 if config["one_d_input"] else config["units"]

        training_inputs = config["x_generator"](
            units=input_units,
            num_points=config["num_training_records"],
            num_buckets=num_random_buckets,
            missing_probability=config["missing_probability"],
            default_input_value=config["default_input_value"])
        training_labels = [config["y_function"](x) for x in training_inputs]

        # Either create multiple CategoricalCalibration layers and combine using a
        # ParallelCombination layer, or create a single CategoricalCalibration with
        # multiple output dimensions.
        if config["use_multi_calibration_layer"]:
            num_calibration_layers = config["units"]
            categorical_calibraion_units = 1
        else:
            num_calibration_layers = 1
            categorical_calibraion_units = config["units"]

        model = keras.models.Sequential()
        model.add(keras.layers.Input(shape=[input_units], dtype=tf.int32))
        calibration_layers = []
        for _ in range(num_calibration_layers):
            calibration_layers.append(
                categorical_calibraion.CategoricalCalibration(
                    units=categorical_calibraion_units,
                    kernel_initializer="constant",
                    num_buckets=config["num_buckets"],
                    output_min=config["output_min"],
                    output_max=config["output_max"],
                    monotonicities=config["monotonicities"],
                    kernel_regularizer=config["kernel_regularizer"],
                    default_input_value=config["default_input_value"]))
        if len(calibration_layers) == 1:
            model.add(calibration_layers[0])
        else:
            model.add(
                parallel_combination.ParallelCombination(calibration_layers))
        if config["units"] > 1:
            model.add(
                keras.layers.Lambda(
                    lambda x: tf.reduce_mean(x, axis=1, keepdims=True)))
        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=config["optimizer"](
                          learning_rate=config["learning_rate"]))

        training_data = (training_inputs, training_labels, training_inputs)

        loss = test_utils.run_training_loop(config=config,
                                            training_data=training_data,
                                            keras_model=model,
                                            plot_path=plot_path,
                                            input_dtype=np.int32)

        assetion_ops = []
        for calibration_layer in calibration_layers:
            assetion_ops.extend(
                calibration_layer.assert_constraints(
                    eps=config["constraint_assertion_eps"]))
        if not tf.executing_eagerly() and assetion_ops:
            tf.compat.v1.keras.backend.get_session().run(assetion_ops)

        return loss
Beispiel #4
0
    def _TrainModel(self, config, plot_path=None):
        """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        piecewise linear layer to train.
      plot_path: if specified - png file name to save visualization. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
        logging.info("Testing config:")
        logging.info(config)
        if plot_path is not None and config["units"] > 1:
            raise ValueError("Test config error. "
                             "Can not plot multi unit calibrators.")
        config = self._SetDefaults(config)

        self._ResetAllBackends()

        # The input to the model can either be single or multi dimensional.
        input_units = 1 if config["one_d_input"] else config["units"]

        training_inputs = config["x_generator"](
            units=input_units,
            num_points=config["num_training_records"],
            input_min=config["input_keypoints"][0],
            input_max=config["input_keypoints"][-1],
            missing_probability=config["missing_probability"],
            missing_input_value=config["missing_input_value"])
        training_labels = [config["y_function"](x) for x in training_inputs]

        # Either create multiple PWLCalibration layers and combine using a
        # ParallelCombination layer, or create a single PWLCalibration with multiple
        # output dimensions.
        if config["use_multi_calibration_layer"]:
            num_calibration_layers = config["units"]
            pwl_calibration_units = 1
        else:
            num_calibration_layers = 1
            pwl_calibration_units = config["units"]

        model = keras.models.Sequential()
        model.add(tf.keras.layers.Input(shape=[input_units], dtype=tf.float32))
        calibration_layers = []
        for _ in range(num_calibration_layers):
            calibration_layers.append(
                pwl_calibraion.PWLCalibration(
                    units=pwl_calibration_units,
                    dtype=tf.float32,
                    input_keypoints=config["input_keypoints"],
                    output_min=config["output_min"],
                    output_max=config["output_max"],
                    clamp_min=config["clamp_min"],
                    clamp_max=config["clamp_max"],
                    monotonicity=config["monotonicity"],
                    convexity=config["convexity"],
                    is_cyclic=config["is_cyclic"],
                    kernel_initializer=config["initializer"],
                    kernel_regularizer=config["kernel_regularizer"],
                    impute_missing=config["impute_missing"],
                    missing_output_value=config["missing_output_value"],
                    missing_input_value=config["missing_input_value"],
                    num_projection_iterations=config[
                        "num_projection_iterations"]))
        if len(calibration_layers) == 1:
            if config["use_separate_missing"]:
                model.add(
                    CalibrateWithSeparateMissing(
                        calibration_layer=calibration_layers[0],
                        missing_input_value=config["missing_input_value"]))
            else:
                model.add(calibration_layers[0])
        else:
            model.add(
                parallel_combination.ParallelCombination(calibration_layers))

        if config["units"] > 1:
            model.add(
                keras.layers.Lambda(
                    lambda x: tf.reduce_mean(x, axis=1, keepdims=True)))

        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=config["optimizer"](
                          learning_rate=config["learning_rate"]))

        training_data = (training_inputs, training_labels, training_inputs)

        loss = test_utils.run_training_loop(config=config,
                                            training_data=training_data,
                                            keras_model=model,
                                            plot_path=plot_path)

        assetion_ops = []
        for calibration_layer in calibration_layers:
            assetion_ops.extend(
                calibration_layer.assert_constraints(
                    eps=config["constraint_assertion_eps"]))
        if not tf.executing_eagerly() and assetion_ops:
            tf.compat.v1.keras.backend.get_session().run(assetion_ops)

        return loss
Beispiel #5
0
  def _TrainModel(self, config, plot_path=None):
    """Trains model and returns loss.

    Args:
      config: Layer config internal for this test which specifies params of
        linear layer to train.
      plot_path: if specified - png file name to save visualisation. See
        test_utils.run_training_loop() for more details.

    Returns:
      Training loss.
    """
    logging.info("Testing config:")
    logging.info(config)
    config = self._SetDefaults(config)

    self._ResetAllBackends()

    training_inputs, training_labels, raw_training_inputs = (
        self._GetTrainingInputsAndLabels(config))
    units = config["units"]
    num_input_dims = config["num_input_dims"]
    if units > 1:
      # In order to test multi 'units' linear, replicate inputs 'units' times
      # and later use just one out of 'units' outputs in order to ensure that
      # multi 'units' linear trains exactly similar to single 'units' one.
      training_inputs = [
          np.tile(np.expand_dims(x, axis=0), reps=[units, 1])
          for x in training_inputs
      ]
      input_shape = (units, num_input_dims)
    else:
      input_shape = (num_input_dims,)

    linear_layer = linl.Linear(
        input_shape=input_shape,
        num_input_dims=config["num_input_dims"],
        units=units,
        monotonicities=config["monotonicities"],
        monotonic_dominances=config["monotonic_dominances"],
        range_dominances=config["range_dominances"],
        input_min=config["clip_min"],
        input_max=config["clip_max"],
        use_bias=config["use_bias"],
        normalization_order=config["normalization_order"],
        kernel_initializer=keras.initializers.Constant(
            config["kernel_init_constant"]),
        bias_initializer=keras.initializers.Constant(
            config["bias_init_constant"]),
        kernel_regularizer=config["kernel_regularizer"],
        bias_regularizer=config["bias_regularizer"],
        dtype=tf.float32)
    model = keras.models.Sequential()
    model.add(linear_layer)
    # When we use multi-unit linear, we only extract a single unit for testing.
    if units > 1:
      unit_index = config["unit_index"]
      model.add(
          keras.layers.Lambda(lambda x: x[:, unit_index:unit_index + 1]))
    optimizer = config["optimizer"](learning_rate=config["learning_rate"])
    model.compile(loss=keras.losses.mean_squared_error, optimizer=optimizer)

    training_data = (training_inputs, training_labels, raw_training_inputs)

    loss = test_utils.run_training_loop(
        config=config,
        training_data=training_data,
        keras_model=model,
        plot_path=plot_path)

    assetion_ops = linear_layer.assert_constraints(
        eps=config["allowed_constraints_violation"])
    if not tf.executing_eagerly() and assetion_ops:
      tf.compat.v1.keras.backend.get_session().run(assetion_ops)
    return loss