def entire_network(self, features, params, is_training, reuse_variables):
        """The definition of the entire network.
        Sometimes, feature normalization is applied after the main network.
        We combine them together (except for the loss layer).

        Args:
            features: The network input.
            params: The parameters.
            is_training: True if the network is for training.
            reuse_variables: Share variables.
        :return: The network output and the endpoints (for other usage).
        """
        features, endpoints = self.network(features, params, is_training, reuse_variables)
        endpoints["output"] = features
        # Add more components (post-processing) after the main network.
        if "feature_norm" in params.dict and params.feature_norm:
            assert "feature_scaling_factor" in params.dict, "If feature normalization is applied, scaling factor is necessary."
            features = l2_scaling(features, params.feature_scaling_factor)
            endpoints["output"] = features

        return features, endpoints
예제 #2
0
    params.dict["amsoftmax_lambda_min"] = 10
    params.dict["amsoftmax_lambda_base"] = 1000
    params.dict["amsoftmax_lambda_gamma"] = 1
    params.dict["amsoftmax_lambda_power"] = 4

    params.dict["arcsoftmax_lambda_min"] = 10
    params.dict["arcsoftmax_lambda_base"] = 1000
    params.dict["arcsoftmax_lambda_gamma"] = 1
    params.dict["arcsoftmax_lambda_power"] = 4

    params.dict["feature_norm"] = True
    params.dict["feature_scaling_factor"] = 20

    from model.common import l2_scaling
    outputs, endpoints = tdnn(features, params, is_training=True, reuse_variables=False)
    outputs = l2_scaling(outputs, params.feature_scaling_factor)
    outputs_norm = tf.norm(outputs, axis=1)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        [outputs_val, outputs_norm_val] = sess.run([outputs, outputs_norm], feed_dict={features: features_val})
        assert np.allclose(np.sqrt(np.sum(outputs_val ** 2, axis=1)), params.feature_scaling_factor)
        assert np.allclose(outputs_norm_val, params.feature_scaling_factor)

    # Test loss functions
    # It only works on debug mode, since the loss is asked to output weights for our numpy computation.
    from model.loss import asoftmax, additive_margin_softmax, additive_angular_margin_softmax
    from model.test_utils import compute_asoftmax, compute_amsoftmax, compute_arcsoftmax

    params.dict["global_step"] = 1
    print("Asoftmax")
    for scaling in [True, False]: