Esempio n. 1
0
def feature_model_fn(features, params):
    band_settings = band_settings_params.BandSettings.from_params(params)
    per_band_data = band_settings.per_band_sub_model_fn(per_band_model_fn,
                                                        features,
                                                        params=params)
    result = tf.stack(per_band_data, axis=1)
    graph_typecheck.assert_shape(result,
                                 [params["batch_size"], band_settings.nbands])
    return {"is_max_soft": result}
Esempio n. 2
0
 def score(self):
     left = self._normalize(self.left)
     right = self._normalize(self.right)
     dotprod = tf.reduce_sum(left * right, axis=1)
     graph_typecheck.assert_shape(dotprod, [self.batch_size])
     # Normalize output to between 0 and 1.
     result = 0.5 * (dotprod + 1.0)
     graph_typecheck.assert_shape(result, [self.batch_size])
     return result
Esempio n. 3
0
    def loss(self, labels: tf.Tensor, score: tf.Tensor = None):
        """Computes the log loss of the scores.

        :param labels: Labels tensor
        :param score: Result from `self.score()`, if reusing such computation is desired.
        :return: Tensor with the loss.
        """
        graph_typecheck.assert_shape(labels, [self.batch_size])
        score = self.score() if score is None else score
        return tf.losses.log_loss(labels=labels,
                                  predictions=score,
                                  epsilon=1e-5)
Esempio n. 4
0
 def __init__(self,
              left: tf.Tensor,
              right: tf.Tensor,
              *,
              batch_size: int,
              hidden_size: int,
              norm: bool = True):
     graph_typecheck.assert_shape(left, [batch_size, hidden_size])
     graph_typecheck.assert_shape(right, [batch_size, hidden_size])
     self.left = left
     self.right = right
     self.batch_size = batch_size
     self.hidden_size = hidden_size
     self.norm = norm
Esempio n. 5
0
def initial_layer_binned(
    initial_layer_features: tf.Tensor,
    cutoff_data: CutoffData,
    band: str,
    soft_onehot: Nonlinearity = Nonlinearity.SIGMOID
):
    batch_size, twice_window_size, channels = map(int, initial_layer_features.shape)
    nonlinearity = nonlinearity_fcn(soft_onehot)
    if channels == 3:
        scales = cutoff_data.dflux_dt_dflux_dtime_scales(band)
        cutoffs = cutoff_data.dflux_dt_dflux_dtime_cutoffs(band)

        cutoffs_batch_window = tf.expand_dims(tf.expand_dims(cutoffs, 0), 0)
        scales_batch_window = tf.expand_dims(
            tf.expand_dims(tf.expand_dims(scales, 0), 0), -1
        )
        init_layer_per_cutoff = tf.expand_dims(initial_layer_features, -1)
        graph_typecheck.assert_shape(
            cutoffs_batch_window, [1, 1, channels, cutoff_data.embedding_size]
        )
        graph_typecheck.assert_shape(scales_batch_window, [1, 1, channels, 1])
        graph_typecheck.assert_shape(
            init_layer_per_cutoff, [batch_size, twice_window_size, channels, 1]
        )
        result = nonlinearity(
            (init_layer_per_cutoff - cutoffs_batch_window) / scales_batch_window
        )
        return graph_typecheck.assert_shape(
            result, [batch_size, twice_window_size, channels, cutoff_data.embedding_size]
        )
    else:
        raise NotImplementedError(f"{channels}-size data not implemented.")
Esempio n. 6
0
    def masked(
        self, expanded_tensor: tf.Tensor, value_if_masked: float,
        expected_extra_dims: typing.List[int]
    ):
        """Masks a tensor which was calculated from dflux_dt.

        :param expanded_tensor: <float>[batch_size, window_size, ...] Tensor with first
            dimensions being batch_size and window_size.
        :param value_if_masked: Value to fill for masked positions.
        :param expected_extra_dims: Expected extra dimensions.
        :returns: Tensor of same shape as expanded_tensor, but with `value_if_masked` filled
            in masked dimensions.
        """
        mask_shape = list(map(int, self.mask.shape))
        graph_typecheck.assert_shape(expanded_tensor, mask_shape + expected_extra_dims)

        value_if_masked = expanded_tensor.dtype.as_numpy_dtype(value_if_masked)
        if_masked_tensor = tf.fill(expanded_tensor.shape, value_if_masked)
        mask = self.mask
        for i in range(2, 2 + len(expected_extra_dims)):
            mask = tf.expand_dims(mask, axis=i)
        mask = tf.tile(mask, [1, 1] + expected_extra_dims)
        return tf.where(mask, expanded_tensor, if_masked_tensor)
Esempio n. 7
0
    def per_side_model(side_features, params):
        inputs = dense_extracted_features.feature_model_fn(side_features, params=params)
        # Channels is dflux/dt, etc.
        graph_typecheck.assert_shape(
            inputs, [batch_size, twice_window_size, channels, embedding_size, nbands]
        )
        first_layer_type = params["first_layer"]
        if first_layer_type == "conv":
            inputs_shuffled = tf.transpose(inputs, perm=[0, 1, 4, 2, 3])
            graph_typecheck.assert_shape(
                inputs_shuffled,
                [batch_size, twice_window_size, nbands, channels, embedding_size]
            )
            conv_result = input_conv(
                tf.reshape(inputs_shuffled, [batch_size, twice_window_size, nbands, -1])
            )
            curr_layer = tf.reshape(conv_result, [batch_size, -1])
        elif first_layer_type == "none":
            curr_layer = tf.reshape(inputs, [batch_size, -1])
        else:
            raise ValueError(f"No known first_layer type {first_layer_type}")

        return per_side_model_keras(curr_layer)
Esempio n. 8
0
def feature_model_fn(features, params):
    band_settings = band_settings_params.BandSettings.from_params(params)
    per_band_data = band_settings.per_band_sub_model_fn_with_band_name(
        per_band_model_fn,
        features,
        params=params,
        value_if_masked=params.get("value_if_masked", 0.0),
        soft_onehot=Nonlinearity[params.get("input_soft_onehot", "sigmoid").upper()]
    )
    return graph_typecheck.assert_shape(
        tf.stack(per_band_data, axis=4), [
            params["batch_size"],
            2 * params["window_size"],
            3,
            cutoff_data_for_window_size(params["window_size"]).embedding_size,
            band_settings.nbands,
        ]
    )
Esempio n. 9
0
 def batch_2win_shaped(t):
     return graph_typecheck.assert_shape(t, [batch_size, 2 * window_size])
Esempio n. 10
0
 def batch_shaped(t):
     return graph_typecheck.assert_shape(t, [batch_size])
Esempio n. 11
0
def per_band_model_fn(band_features, params, debug_print=False):
    # NOTE(gatoatigrado): dense_extracted_features.WindowFeatures provides a convenient
    # API for many calculations here, but right now the unit test data for max_model_kernel_test
    # does not provide time tensors, making it inconvenient to modernize this code.
    batch_size = params["batch_size"]
    window_size = params["window_size"]
    inv_eps = 1.0 / params["flux_scale_epsilon"]
    graph_typecheck.assert_shape(band_features["before_padding"], [batch_size])
    graph_typecheck.assert_shape(band_features["after_padding"], [batch_size])
    closest_flux = graph_typecheck.assert_shape(
        band_features["closest_flux_in_band"], [batch_size])

    before_flux = graph_typecheck.assert_shape(band_features["before_flux"],
                                               [batch_size, window_size])
    after_flux = graph_typecheck.assert_shape(band_features["after_flux"],
                                              [batch_size, window_size])

    # Return a soft-greater-than operator, product that all scores are greater.
    is_greater_than_before = masked_sigmoid(
        inv_eps * (tf.expand_dims(closest_flux, axis=1) - before_flux),
        _left_mask(band_features["before_padding"], window_size))
    is_greater_than_after = masked_sigmoid(
        inv_eps * (tf.expand_dims(closest_flux, axis=1) - after_flux),
        _right_mask(band_features["after_padding"], window_size))
    graph_typecheck.assert_shape(is_greater_than_before,
                                 [batch_size, window_size])
    graph_typecheck.assert_shape(is_greater_than_after,
                                 [batch_size, window_size])
    if debug_print:
        is_greater_than_before = graph_typecheck.print_single(
            is_greater_than_before, "is_greater_than_before:")
        is_greater_than_after = graph_typecheck.print_single(
            is_greater_than_after, "is_greater_than_after:")
    return tf.reduce_prod(is_greater_than_before, axis=1) * tf.reduce_prod(
        is_greater_than_after, axis=1)