def test_input_fifo_queue(self): """Test InputFifoQueue can be invoked.""" batch_size = 10 n_features = 5 in_tensor = np.random.rand(batch_size, n_features) tf.reset_default_graph() with self.test_session() as sess: in_tensor = TensorWrapper( tf.convert_to_tensor(in_tensor, dtype=tf.float32), name="input") InputFifoQueue([(batch_size, n_features)], ["input"])(in_tensor)
def add_adapter(self, all_layers, task, layer_num): """Add an adapter connection for given task/layer combo""" i = layer_num prev_layers = [] trainable_layers = [] # Handle output layer if i < len(self.layer_sizes): layer_sizes = self.layer_sizes alpha_init_stddev = self.alpha_init_stddevs[i] weight_init_stddev = self.weight_init_stddevs[i] bias_init_const = self.bias_init_consts[i] elif i == len(self.layer_sizes): layer_sizes = self.layer_sizes + [1] alpha_init_stddev = self.alpha_init_stddevs[-1] weight_init_stddev = self.weight_init_stddevs[-1] bias_init_const = self.bias_init_consts[-1] else: raise ValueError("layer_num too large for add_adapter.") # Iterate over all previous tasks. for prev_task in range(task): prev_layers.append(all_layers[(i - 1, prev_task)]) # prev_layers is a list with elements of size # (batch_size, layer_sizes[i-1]) prev_layer = Concat(axis=1, in_layers=prev_layers) with self._get_tf("Graph").as_default(): alpha = TensorWrapper( tf.Variable(tf.truncated_normal((1, ), stddev=alpha_init_stddev), name="alpha_layer_%d_task%d" % (i, task))) trainable_layers.append(alpha) prev_layer = prev_layer * alpha dense1 = Dense(in_layers=[prev_layer], out_channels=layer_sizes[i - 1], activation_fn=None, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_init_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_init_const)) trainable_layers.append(dense1) dense2 = Dense(in_layers=[dense1], out_channels=layer_sizes[i], activation_fn=None, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_init_stddev), biases_initializer=None) trainable_layers.append(dense2) return dense2, trainable_layers
def build_graph(self): print("building") features = Feature(shape=(None, self.n_features)) last_layer = features for layer_size in self.encoder_layers: last_layer = Dense(in_layers=last_layer, activation_fn=tf.nn.elu, out_channels=layer_size) self.mean = Dense(in_layers=last_layer, activation_fn=None, out_channels=1) self.std = Dense(in_layers=last_layer, activation_fn=None, out_channels=1) readout = CombineMeanStd([self.mean, self.std], training_only=True) last_layer = readout for layer_size in self.decoder_layers: last_layer = Dense(in_layers=readout, activation_fn=tf.nn.elu, out_channels=layer_size) self.reconstruction = Dense(in_layers=last_layer, activation_fn=None, out_channels=self.n_features) weights = Weights(shape=(None, self.n_features)) reproduction_loss = L2Loss( in_layers=[features, self.reconstruction, weights]) reproduction_loss = ReduceSum(in_layers=reproduction_loss, axis=0) global_step = TensorWrapper(self._get_tf("GlobalStep")) kl_loss = KLDivergenceLoss( in_layers=[self.mean, self.std, global_step], annealing_start_step=self.kl_annealing_start_step, annealing_stop_step=self.kl_annealing_stop_step) loss = Add(in_layers=[kl_loss, reproduction_loss], weights=[0.5, 1]) self.add_output(self.mean) self.add_output(self.reconstruction) self.set_loss(loss)