def closure(inputs, unused_conditioning=None, weight_decay=2.5e-5, is_training=True) -> tf.Tensor: input_layer = tf.reshape(inputs, [-1, size]) outputs = tf.expand_dims(input_layer, 2) outputs = conv1d(outputs, filters=4, kernel_size=2, strides=1, padding='same', activation=leaky_relu) outputs = conv1d(outputs, filters=4, kernel_size=2, strides=1, padding='same', activation=leaky_relu) outputs = conv1d(outputs, filters=4, kernel_size=2, strides=1, padding='same', activation=leaky_relu) outputs = conv1d(outputs, filters=4, kernel_size=2, strides=1, padding='same', activation=leaky_relu) outputs = max_pooling1d(outputs, pool_size=2, strides=1) outputs = flatten(outputs) outputs = fully_connected(outputs, 4, activation=leaky_relu) outputs = fully_connected(outputs, 1, activation=leaky_relu) return outputs
def __call__(self, query, state): with variable_scope.variable_scope(None, 'location_sensitive_attention', [query]): key = self._keys query = self.query_layer(query) if self.query_layer else query query = array_ops.expand_dims(query, axis=1) proceeding_alignment = array_ops.expand_dims(state, axis=2) with variable_scope.variable_scope( 'attention_convolution_dense_layer', reuse=variable_scope.AUTO_REUSE): proceeding_alignment = convolutional.conv1d( inputs=proceeding_alignment, filters=self.conv_channel, kernel_size=self.conv_kernel_size, strides=self.conv_stride_size, padding='same') proceeding_alignment = core.dense(inputs=proceeding_alignment, units=self.num_units, use_bias=False) energy = self.score(key, query, proceeding_alignment) alignment = self._probability_fn( energy, state ) # 4-2. alignment of shape [batch_size, max_encoder_output_lengths] cumulated_alignment = alignment + state # 4-3. cumulated_alignment of shape [batch_size, max_encoder_output_lengths] next_state = cumulated_alignment # 4-3. cumulated_alignment of shape [batch_size, max_encoder_output_lengths] return alignment, next_state
def layer(x, name=None): with variable_scope.variable_scope(name, default_name="layer"): x = layers.layer_norm(x) x = convolutional.conv1d( x, 10, 1, use_bias=False, kernel_initializer=init_ops.constant_initializer(42.42)) x = nn_ops.relu(x) return x
def layer(x, name=None): with variable_scope.variable_scope(name, default_name="layer"): x = layers.layer_norm(x) x = convolutional.conv1d( x, 10, 1, use_bias=False, kernel_initializer=init_ops.constant_initializer(42.42)) x = nn_ops.relu(x) return x
def f(x): x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = layers.batch_norm(x, is_training=False) x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = layers.batch_norm(x, is_training=False) return x
def testConv1DFloat16(self): width = 7 data = random_ops.random_uniform((5, width, 4), dtype='float16') output = conv_layers.conv1d(data, 32, 3, activation=nn_ops.relu) self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
def f(x): x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = layers.batch_norm(x, is_training=True) x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = layers.batch_norm(x, is_training=True) return x
def testConv1DFloat16(self): width = 7 data = random_ops.random_uniform((5, width, 4), dtype='float16') output = conv_layers.conv1d(data, 32, 3, activation=nn_ops.relu) self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
def f(x): x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = core_layers.batch_normalization(x, training=True) x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = core_layers.batch_normalization(x, training=True) return x
def f(x): x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = core_layers.batch_normalization(x, training=True) x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same") x = core_layers.batch_normalization(x, training=True) return x