def make_embedding(self, x): hparams = self.hparams for size in hparams.inner_size, hparams.inner_size, hparams.hidden_size: x = model_utils.pad_up_to(x, size=13, axis=1) x = keras.layers.Convolution1D(size, hparams.filter_width, border_mode='valid', subsample_length=1, activation=None)(x) x = tf.nn.relu(x) x = tf.reduce_max(x, reduction_indices=1) return x
def testPadUpTo(self): np.random.seed(7) with self.test_session(): x = np.random.randn(3, 5) for axis in 0, 1: for size in range(7): y = model_utils.pad_up_to(x, size=size, axis=axis).eval() self.assertLessEqual(size, y.shape[axis]) shape = list(y.shape) shape[axis] = x.shape[axis] self.assertEqual(x.shape, tuple(shape)) self.assertAllEqual(x, y[:3, :5])
def make_embedding(self, x): hparams = self.hparams for size in 1024, 1024, hparams.hidden_size: x = model_utils.pad_up_to(x, size=13, axis=1) x = self.dropout(x) reg = self.make_keras_regularizer() x = keras.layers.Convolution1D(size, hparams.filter_width, border_mode='valid', subsample_length=1, W_regularizer=reg, activation=None)(x) self.apply_keras_regularizer(reg) x = self.batch_norm(x) x = tf.nn.relu(x) x = tf.reduce_max(x, reduction_indices=1) return x
def make_embedding(self, x): with self.graph.as_default(): x = model_utils.pad_up_to(x, size=29, axis=1) x = players.Convolution1D(128, 5, border_mode='valid', subsample_length=2, activation='relu')(x) x = players.Convolution1D(512, 5, border_mode='valid', subsample_length=2, activation='relu')(x) x = players.Convolution1D(self.embedding_size, 5, border_mode='valid', activation='relu')(x) initializer = tf.truncated_normal_initializer(stddev=0.01, seed=1337) cell = tf.contrib.rnn.LSTMCell(self.embedding_size, use_peepholes=False, initializer=initializer, num_proj=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=False) x, _ = tf.nn.dynamic_rnn(cell, x, sequence_length=None, initial_state=None, dtype='float32', parallel_iterations=32, swap_memory=False) last_timestep = tf.shape(x)[1] indices = tf.stack([0, last_timestep - 1, 0]) indices = tf.cast(indices, 'int32') embedded = tf.slice(x, indices, [-1, 1, -1]) embedded = tf.squeeze(embedded, [1]) embedded.set_shape((None, self.embedding_size)) return embedded
def make_embedding(self, x): with self.graph.as_default(): x = model_utils.pad_up_to(x, size=13, axis=1) x = players.Convolution1D(1024, 5, border_mode='valid', subsample_length=1, activation='relu')(x) x = players.Convolution1D(1024, 5, border_mode='valid', subsample_length=1, activation='relu')(x) x = players.Convolution1D(self.embedding_size, 5, border_mode='valid', subsample_length=1, activation='relu')(x) x = tf.reduce_max(x, reduction_indices=1) return x