Example #1
0
 def __init__(self, beam_size,
              search=NotSpecified,
              input_type="prob",
              prob_scale=1.0, base_beam_score_scale=1.0, random_sample_scale=0.0,
              length_normalization=True,
              custom_score_combine=None,
              source_beam_sizes=None, scheduled_sampling=False, cheating=False,
              explicit_search_sources=None,
              **kwargs):
   super(ChoiceStateVarLayer, self).__init__(**kwargs)
   rec_layer = self.network.parent_layer
   assert isinstance(rec_layer, RecStepByStepLayer)
   assert len(self.sources) == 1
   source = self.sources[0]
   assert source.output.is_batch_major and len(source.output.shape) == 1
   scores_in = source.output.placeholder
   # TODO infer input_type from source layer class 'softmax' or 'linear' with 'log_softmax' activation
   if input_type == "prob":
     if source.output_before_activation:
       scores_in = source.output_before_activation.get_log_output()
     else:
       from TFUtil import safe_log
       scores_in = safe_log(scores_in)
   elif input_type == "log_prob":
     pass
   else:
     raise ValueError("Not handled input_type %r" % (input_type,))
   rec_layer.create_state_var(
     name="stochastic_var_scores_%s" % self.name, data_shape=source.output)
   rec_layer.set_state_var_final_value(
     name="stochastic_var_scores_%s" % self.name, final_value=scores_in)
   self.output.placeholder = rec_layer.create_state_var(
     name="stochastic_var_choice_%s" % self.name, data_shape=self.output)
   rec_layer.add_stochastic_var(self.name)
Example #2
0
 def _build_clp_multiplication(self, clp_kernel):
   from TFUtil import safe_log
   input_placeholder = self.input_data.get_placeholder_as_batch_major()
   tf.assert_equal(tf.shape(clp_kernel)[1], tf.shape(input_placeholder)[2] // 2)
   tf.assert_equal(tf.shape(clp_kernel)[2], self._nr_of_filters)
   input_real = tf.strided_slice(input_placeholder, [0, 0, 0], tf.shape(input_placeholder), [1, 1, 2])
   input_imag = tf.strided_slice(input_placeholder, [0, 0, 1], tf.shape(input_placeholder), [1, 1, 2])
   kernel_real = self._clp_kernel[0, :, :]
   kernel_imag = self._clp_kernel[1, :, :]
   output_real = tf.einsum('btf,fp->btp', input_real, kernel_real) - tf.einsum('btf,fp->btp', input_imag, kernel_imag)
   output_imag = tf.einsum('btf,fp->btp', input_imag, kernel_real) + tf.einsum('btf,fp->btp', input_real, kernel_imag)
   output_uncompressed = tf.sqrt(tf.pow(output_real, 2) + tf.pow(output_imag, 2))
   output_compressed = safe_log(output_uncompressed)
   return output_compressed
Example #3
0
 def _build_clp_multiplication(self, clp_kernel):
     from TFUtil import safe_log
     input_placeholder = self.input_data.get_placeholder_as_batch_major()
     tf.assert_equal(
         tf.shape(clp_kernel)[1],
         tf.shape(input_placeholder)[2] // 2)
     tf.assert_equal(tf.shape(clp_kernel)[2], self._nr_of_filters)
     input_real = tf.strided_slice(input_placeholder, [0, 0, 0],
                                   tf.shape(input_placeholder), [1, 1, 2])
     input_imag = tf.strided_slice(input_placeholder, [0, 0, 1],
                                   tf.shape(input_placeholder), [1, 1, 2])
     kernel_real = self._clp_kernel[0, :, :]
     kernel_imag = self._clp_kernel[1, :, :]
     output_real = tf.einsum('btf,fp->btp', input_real,
                             kernel_real) - tf.einsum(
                                 'btf,fp->btp', input_imag, kernel_imag)
     output_imag = tf.einsum('btf,fp->btp', input_imag,
                             kernel_real) + tf.einsum(
                                 'btf,fp->btp', input_real, kernel_imag)
     output_uncompressed = tf.sqrt(
         tf.pow(output_real, 2) + tf.pow(output_imag, 2))
     output_compressed = safe_log(output_uncompressed)
     return output_compressed