Пример #1
0
    def __init__(
        self,
        attribute: Text,
        feature_type: Text,
        feature_type_signature: List[FeatureSignature],
        config: Dict[Text, Any],
    ) -> None:
        """Creates a new `ConcatenateSparseDenseFeatures` object."""
        if not feature_type_signature:
            raise TFLayerConfigException(
                "The feature type signature must contain some feature signatures."
            )

        super().__init__(
            name=f"concatenate_sparse_dense_features_{attribute}_{feature_type}"
        )

        self._check_sparse_input_units(feature_type_signature)

        self.output_units = self._calculate_output_units(
            attribute, feature_type_signature, config)

        # Prepare dropout and sparse-to-dense layers if any sparse tensors are expected
        self._tf_layers: Dict[Text, tf.keras.layers.Layer] = {}
        if any([signature.is_sparse for signature in feature_type_signature]):
            self._prepare_layers_for_sparse_tensors(attribute, feature_type,
                                                    config)
Пример #2
0
    def __init__(
        self,
        attribute: Text,
        attribute_signature: Dict[Text, List[FeatureSignature]],
        config: Dict[Text, Any],
    ) -> None:
        """Creates a new `RasaFeatureCombiningLayer` object."""
        if not attribute_signature or not (attribute_signature.get(
                SENTENCE, []) or attribute_signature.get(SEQUENCE, [])):
            raise TFLayerConfigException(
                "The attribute signature must contain some feature signatures."
            )

        super().__init__(name=f"rasa_feature_combining_layer_{attribute}")

        self._tf_layers: Dict[Text, tf.keras.layers.Layer] = {}

        # Prepare sparse-dense combining layers for each present feature type
        self._feature_types_present = self._get_present_feature_types(
            attribute_signature)
        self._prepare_sparse_dense_concat_layers(attribute,
                                                 attribute_signature, config)

        # Prepare components for combining sequence- and sentence-level features
        self._prepare_sequence_sentence_concat(attribute, config)

        self.output_units = self._calculate_output_units(attribute, config)
Пример #3
0
    def __init__(self, density: float = 0.2, **kwargs: Any) -> None:
        """Declares instance variables with default values.

        Args:
            density: Float between 0 and 1. Approximate fraction of trainable weights.
            units: Positive integer, dimensionality of the output space.
            activation: Activation function to use.
                If you don't specify anything, no activation is applied
                (ie. "linear" activation: `a(x) = x`).
            use_bias: Boolean, whether the layer uses a bias vector.
            kernel_initializer: Initializer for the `kernel` weights matrix.
            bias_initializer: Initializer for the bias vector.
            kernel_regularizer: Regularizer function applied to
                the `kernel` weights matrix.
            bias_regularizer: Regularizer function applied to the bias vector.
            activity_regularizer: Regularizer function applied to
                the output of the layer (its "activation")..
            kernel_constraint: Constraint function applied to
                the `kernel` weights matrix.
            bias_constraint: Constraint function applied to the bias vector.
        """
        super().__init__(**kwargs)

        if density < 0.0 or density > 1.0:
            raise TFLayerConfigException("Layer density must be in [0, 1].")

        self.density = density
Пример #4
0
 def _chosen_loss(self) -> Callable:
     """Use loss depending on given option."""
     if self.loss_type == MARGIN:
         return self._loss_margin
     elif self.loss_type == CROSS_ENTROPY:
         return self._loss_cross_entropy
     else:
         raise TFLayerConfigException(
             f"Wrong loss type '{self.loss_type}', "
             f"should be '{MARGIN}' or '{CROSS_ENTROPY}'")
Пример #5
0
 def _check_sparse_input_units(
         self, feature_type_signature: List[FeatureSignature]) -> None:
     """Checks that all sparse features have the same last dimension size."""
     sparse_units = [
         feature_sig.units for feature_sig in feature_type_signature
         if feature_sig.is_sparse
     ]
     if len(set(sparse_units)) > 1:
         raise TFLayerConfigException(
             f"All sparse features must have the same last dimension size but found "
             f"different sizes: {set(sparse_units)}.")
Пример #6
0
    def __init__(
        self,
        attribute: Text,
        attribute_signature: Dict[Text, List[FeatureSignature]],
        config: Dict[Text, Any],
    ) -> None:
        """Creates a new `RasaSequenceLayer` object."""
        if not attribute_signature or not attribute_signature.get(SEQUENCE, []):
            raise TFLayerConfigException(
                "The attribute signature must contain some sequence-level feature"
                "signatures but none were found."
            )

        super().__init__(name=f"rasa_sequence_layer_{attribute}")

        self._tf_layers: Dict[Text, Any] = {
            self.FEATURE_COMBINING: RasaFeatureCombiningLayer(
                attribute, attribute_signature, config
            ),
            self.FFNN: layers.Ffnn(
                config[HIDDEN_LAYERS_SIZES][attribute],
                config[DROP_RATE],
                config[REGULARIZATION_CONSTANT],
                config[WEIGHT_SPARSITY],
                layer_name_suffix=attribute,
            ),
        }

        self._enables_mlm = False
        # Note: Within TED, masked language modeling becomes just input dropout,
        # since there is no loss term associated with predicting the masked tokens.
        self._prepare_masked_language_modeling(attribute, attribute_signature, config)

        transformer_layers, transformer_units = self._prepare_transformer(
            attribute, config
        )
        self._has_transformer = transformer_layers > 0

        self.output_units = self._calculate_output_units(
            attribute, transformer_layers, transformer_units, config
        )
Пример #7
0
    def __init__(
        self,
        num_neg: int,
        loss_type: Text,
        mu_pos: float,
        mu_neg: float,
        use_max_sim_neg: bool,
        neg_lambda: float,
        scale_loss: bool,
        similarity_type: Text,
        name: Optional[Text] = None,
        same_sampling: bool = False,
        constrain_similarities: bool = True,
        model_confidence: Text = SOFTMAX,
    ) -> None:
        """Declare instance variables with default values.

        Args:
            num_neg: Positive integer, the number of incorrect labels;
                the algorithm will minimize their similarity to the input.
            loss_type: The type of the loss function, either 'cross_entropy' or 'margin'.
            mu_pos: Float, indicates how similar the algorithm should
                try to make embedding vectors for correct labels;
                should be 0.0 < ... < 1.0 for 'cosine' similarity type.
            mu_neg: Float, maximum negative similarity for incorrect labels,
                should be -1.0 < ... < 1.0 for 'cosine' similarity type.
            use_max_sim_neg: Boolean, if 'True' the algorithm only minimizes
                maximum similarity over incorrect intent labels,
                used only if 'loss_type' is set to 'margin'.
            neg_lambda: Float, the scale of how important is to minimize
                the maximum similarity between embeddings of different labels,
                used only if 'loss_type' is set to 'margin'.
            scale_loss: Boolean, if 'True' scale loss inverse proportionally to
                the confidence of the correct prediction.
            similarity_type: Similarity measure to use, either 'cosine' or 'inner'.
            name: Optional name of the layer.
            same_sampling: Boolean, if 'True' sample same negative labels
                for the whole batch.
            constrain_similarities: Boolean, if 'True' applies sigmoid on all
                similarity terms and adds to the loss function to
                ensure that similarity values are approximately bounded.
                Used inside _loss_cross_entropy() only.
            model_confidence: Model confidence to be returned during inference.
                Possible values - 'softmax', 'cosine' and 'inner'.

        Raises:
            LayerConfigException: When `similarity_type` is not one of 'cosine' or 'inner'.
        """
        super().__init__(name=name)
        self.num_neg = num_neg
        self.loss_type = loss_type
        self.mu_pos = mu_pos
        self.mu_neg = mu_neg
        self.use_max_sim_neg = use_max_sim_neg
        self.neg_lambda = neg_lambda
        self.scale_loss = scale_loss
        self.same_sampling = same_sampling
        self.constrain_similarities = constrain_similarities
        self.model_confidence = model_confidence
        self.similarity_type = similarity_type
        if self.similarity_type not in {COSINE, INNER}:
            raise TFLayerConfigException(
                f"Wrong similarity type '{self.similarity_type}', "
                f"should be '{COSINE}' or '{INNER}'.")