def build_train_graph_for_RVAE(rvae_block, look_behind_length=0): token_emb_size = get_size_of_input_vecotrs(rvae_block) c = td.Composition() with c.scope(): padded_input_sequence = td.Map(td.Vector(token_emb_size)).reads( c.input) network_output = rvae_block network_output.reads(padded_input_sequence) un_normalised_token_probs = td.GetItem(0).reads(network_output) mus_and_log_sigs = td.GetItem(1).reads(network_output) input_sequence = td.Slice( start=look_behind_length).reads(padded_input_sequence) # TODO: metric that output of rnn is the same as input sequence cross_entropy_loss = td.ZipWith( td.Function(softmax_crossentropy)) >> td.Mean() cross_entropy_loss.reads(un_normalised_token_probs, input_sequence) kl_loss = td.Function(kl_divergence) kl_loss.reads(mus_and_log_sigs) td.Metric('cross_entropy_loss').reads(cross_entropy_loss) td.Metric('kl_loss').reads(kl_loss) c.output.reads(td.Void()) return c
def build_token_level_RVAE(z_size, token_emb_size, look_behind_length): c = td.Composition() c.set_input_type( td.SequenceType(td.TensorType(([token_emb_size]), 'float32'))) with c.scope(): padded_input_sequence = c.input # build encoder block encoder_rnn_cell = build_program_encoder(default_gru_cell(2 * z_size)) output_sequence = td.RNN(encoder_rnn_cell) >> td.GetItem(0) mus_and_log_sigs = output_sequence >> td.GetItem(-1) reparam_z = resampling_block(z_size) if look_behind_length > 0: decoder_input_sequence = ( td.Slice(stop=-1) >> td.NGrams(look_behind_length) >> td.Map( td.Concat())) else: decoder_input_sequence = td.Map( td.Void() >> td.FromTensor(tf.zeros((0, )))) # build decoder block un_normalised_token_probs = build_program_decoder( token_emb_size, default_gru_cell(z_size), just_tokens=True) # remove padding for input sequence input_sequence = td.Slice(start=look_behind_length) input_sequence.reads(padded_input_sequence) mus_and_log_sigs.reads(input_sequence) reparam_z.reads(mus_and_log_sigs) decoder_input_sequence.reads(padded_input_sequence) td.Metric('encoder_sequence_length').reads( td.Length().reads(input_sequence)) td.Metric('decoder_sequence_length').reads( td.Length().reads(decoder_input_sequence)) un_normalised_token_probs.reads(decoder_input_sequence, reparam_z) c.output.reads(un_normalised_token_probs, mus_and_log_sigs) return c
def bidirectional_dynamic_FC(fw_cell, bw_cell, hidden): bidir_conv_lstm = td.Composition() with bidir_conv_lstm.scope(): fw_seq = td.Identity().reads(bidir_conv_lstm.input[0]) labels = ( td.GetItem(1) >> td.Map(td.Metric("labels")) >> td.Void()).reads( bidir_conv_lstm.input) bw_seq = td.Slice(step=-1).reads(fw_seq) forward_dir = (td.RNN(fw_cell) >> td.GetItem(0)).reads(fw_seq) back_dir = (td.RNN(bw_cell) >> td.GetItem(0)).reads(bw_seq) back_to_leftright = td.Slice(step=-1).reads(back_dir) output_transform = td.FC(1, activation=None) bidir_common = (td.ZipWith( td.Concat() >> output_transform >> td.Metric('logits'))).reads( forward_dir, back_to_leftright) bidir_conv_lstm.output.reads(bidir_common) return bidir_conv_lstm
def __init__(self, image_feat_grid, text_seq_batch, seq_length_batch, T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, assembler, encoder_dropout, decoder_dropout, decoder_sampling, num_choices, use_qpn, qpn_dropout, reduce_visfeat_dim=False, new_visfeat_dim=256, use_gt_layout=None, gt_layout_batch=None, scope='neural_module_network', reuse=None): with tf.variable_scope(scope, reuse=reuse): # Part 0: Visual feature from CNN self.reduce_visfeat_dim = reduce_visfeat_dim if reduce_visfeat_dim: # use an extrac linear 1x1 conv layer (without ReLU) # to reduce the feature dimension with tf.variable_scope('reduce_visfeat_dim'): image_feat_grid = conv('conv_reduce_visfeat_dim', image_feat_grid, kernel_size=1, stride=1, output_dim=new_visfeat_dim) print('visual feature dimension reduced to %d' % new_visfeat_dim) self.image_feat_grid = image_feat_grid # Part 1: Seq2seq RNN to generate module layout tokensa with tf.variable_scope('layout_generation'): att_seq2seq = AttentionSeq2Seq(text_seq_batch, seq_length_batch, T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, assembler, encoder_dropout, decoder_dropout, decoder_sampling, use_gt_layout, gt_layout_batch) self.att_seq2seq = att_seq2seq predicted_tokens = att_seq2seq.predicted_tokens token_probs = att_seq2seq.token_probs word_vecs = att_seq2seq.word_vecs neg_entropy = att_seq2seq.neg_entropy self.atts = att_seq2seq.atts self.predicted_tokens = predicted_tokens self.token_probs = token_probs self.word_vecs = word_vecs self.neg_entropy = neg_entropy # log probability of each generated sequence self.log_seq_prob = tf.reduce_sum(tf.log(token_probs), axis=0) # Part 2: Neural Module Network with tf.variable_scope('layout_execution'): modules = Modules(image_feat_grid, word_vecs, None, num_choices) self.modules = modules # Recursion of modules att_shape = image_feat_grid.get_shape().as_list()[1:-1] + [1] # Forward declaration of module recursion att_expr_decl = td.ForwardDeclaration(td.PyObjectType(), td.TensorType(att_shape)) # _Scene case_scene = td.Record([('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32'))]) case_scene = case_scene >> td.Function(modules.SceneModule) # _Find case_find = td.Record([('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32'))]) case_find = case_find >> td.Function(modules.FindModule) # _Filter case_filter = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32'))]) case_filter = case_filter >> td.Function(modules.FilterModule) # _FindSameProperty case_find_same_property = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32'))]) case_find_same_property = case_find_same_property >> \ td.Function(modules.FindSamePropertyModule) # _Transform case_transform = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_transform = case_transform >> td.Function(modules.TransformModule) # _And case_and = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_and = case_and >> td.Function(modules.AndModule) # _Or case_or = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_or = case_or >> td.Function(modules.OrModule) # _Exist case_exist = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_exist = case_exist >> td.Function(modules.ExistModule) # _Count case_count = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_count = case_count >> td.Function(modules.CountModule) # _EqualNum case_equal_num = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_equal_num = case_equal_num >> td.Function(modules.EqualNumModule) # _MoreNum case_more_num = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_more_num = case_more_num >> td.Function(modules.MoreNumModule) # _LessNum case_less_num = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_less_num = case_less_num >> td.Function(modules.LessNumModule) # _SameProperty case_same_property = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_same_property = case_same_property >> \ td.Function(modules.SamePropertyModule) # _Describe case_describe = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_describe = case_describe >> \ td.Function(modules.DescribeModule) recursion_cases = td.OneOf(td.GetItem('module'), { '_Scene': case_scene, '_Find': case_find, '_Filter': case_filter, '_FindSameProperty': case_find_same_property, '_Transform': case_transform, '_And': case_and, '_Or': case_or}) att_expr_decl.resolve_to(recursion_cases) # For invalid expressions, define a dummy answer # so that all answers have the same form dummy_scores = td.Void() >> td.FromTensor(np.zeros(num_choices, np.float32)) output_scores = td.OneOf(td.GetItem('module'), { '_Exist': case_exist, '_Count': case_count, '_EqualNum': case_equal_num, '_MoreNum': case_more_num, '_LessNum': case_less_num, '_SameProperty': case_same_property, '_Describe': case_describe, INVALID_EXPR: dummy_scores}) # compile and get the output scores self.compiler = td.Compiler.create(output_scores) self.scores_nmn = self.compiler.output_tensors[0] # Add a question prior network if specified self.use_qpn = use_qpn self.qpn_dropout = qpn_dropout if use_qpn: self.scores_qpn = question_prior_net(att_seq2seq.encoder_states, num_choices, qpn_dropout) self.scores = self.scores_nmn + self.scores_qpn else: self.scores = self.scores_nmn # Regularization: Entropy + L2 self.entropy_reg = tf.reduce_mean(neg_entropy) module_weights = [v for v in tf.trainable_variables() if (scope in v.op.name and v.op.name.endswith('weights'))] self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights])
def _compile(self): with self.sess.as_default(): import tensorflow_fold as td output_size = len(self.labels) self.keep_prob = tf.placeholder_with_default(tf.constant(1.0),shape=None) fshape = (self.window_size * (self.char_embedding_size + self.char_feature_embedding_size), self.num_filters) filt_w3 = tf.Variable(tf.random_normal(fshape, stddev=0.05)) def CNN_Window3(filters): return td.Function(lambda a, b, c: cnn_operation([a,b,c],filters)) def cnn_operation(window_sequences,filters): windows = tf.concat(window_sequences,axis=-1) products = tf.multiply(tf.expand_dims(windows,axis=-1),filters) return tf.reduce_sum(products,axis=-2) char_emb = td.Embedding(num_buckets=self.char_buckets, num_units_out=self.char_embedding_size) cnn_layer = (td.NGrams(self.window_size) >> td.Map(CNN_Window3(filt_w3)) >> td.Max()) # --------- char features def charfeature_lookup(c): if c in string.lowercase: return 0 elif c in string.uppercase: return 1 elif c in string.punctuation: return 2 else: return 3 char_input = td.Map(td.InputTransform(lambda c: ord(c.lower())) >> td.Scalar('int32') >> char_emb) char_features = td.Map(td.InputTransform(charfeature_lookup) >> td.Scalar(dtype='int32') >> td.Embedding(num_buckets=4, num_units_out=self.char_feature_embedding_size)) charlevel = (td.InputTransform(lambda s: ['~'] + [ c for c in s ] + ['~']) >> td.AllOf(char_input,char_features) >> td.ZipWith(td.Concat()) >> cnn_layer) # --------- word features word_emb = td.Embedding(num_buckets=len(self.word_vocab), num_units_out=self.embedding_size, initializer=self.word_embeddings) wordlookup = lambda w: (self.word_vocab.index(w.lower()) if w.lower() in self.word_vocab else 0) wordinput = (td.InputTransform(wordlookup) >> td.Scalar(dtype='int32') >> word_emb) def wordfeature_lookup(w): if re.match('^[a-z]+$',w): return 0 elif re.match('^[A-Z][a-z]+$',w): return 1 elif re.match('^[A-Z]+$',w): return 2 elif re.match('^[A-Za-z]+$',w): return 3 else: return 4 wordfeature = (td.InputTransform(wordfeature_lookup) >> td.Scalar(dtype='int32') >> td.Embedding(num_buckets=5, num_units_out=32)) #----------- rnn_fwdcell = td.ScopedLayer(tf.contrib.rnn.LSTMCell( num_units=self.rnn_dim), 'lstm_fwd') fwdlayer = td.RNN(rnn_fwdcell) >> td.GetItem(0) rnn_bwdcell = td.ScopedLayer(tf.contrib.rnn.LSTMCell( num_units=self.rnn_dim), 'lstm_bwd') bwdlayer = (td.Slice(step=-1) >> td.RNN(rnn_bwdcell) >> td.GetItem(0) >> td.Slice(step=-1)) rnn_layer = td.AllOf(fwdlayer, bwdlayer) >> td.ZipWith(td.Concat()) output_layer = td.FC(output_size, input_keep_prob=self.keep_prob, activation=None) wordlevel = td.AllOf(wordinput,wordfeature) >> td.Concat() network = (td.Map(td.AllOf(wordlevel,charlevel) >> td.Concat()) >> rnn_layer >> td.Map(output_layer) >> td.Map(td.Metric('y_out'))) >> td.Void() groundlabels = td.Map(td.Vector(output_size,dtype=tf.int32) >> td.Metric('y_true')) >> td.Void() self.compiler = td.Compiler.create((network, groundlabels)) self.y_out = self.compiler.metric_tensors['y_out'] self.y_true = self.compiler.metric_tensors['y_true'] self.y_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits=self.y_out,labels=self.y_true)) self.y_prob = tf.nn.softmax(self.y_out) self.y_true_idx = tf.argmax(self.y_true,axis=-1) self.y_pred_idx = tf.argmax(self.y_prob,axis=-1) self.y_pred = tf.one_hot(self.y_pred_idx,depth=output_size,dtype=tf.int32) epoch_step = tf.Variable(0, trainable=False) self.epoch_step_op = tf.assign(epoch_step, epoch_step+1) lrate_decay = tf.train.exponential_decay(self.lrate, epoch_step, 1, self.decay) if self.optimizer == 'adam': self.opt = tf.train.AdamOptimizer(learning_rate=lrate_decay) elif self.optimizer == 'adagrad': self.opt = tf.train.AdagradOptimizer(learning_rate=lrate_decay, initial_accumulator_value=1e-08) elif self.optimizer == 'rmsprop': self.opt = tf.train.RMSPropOptimizer(learning_rate=lrate_decay, epsilon=1e-08) else: raise Exception(('The optimizer {} is not in list of available ' + 'optimizers: default, adam, adagrad, rmsprop.') .format(self.optimizer)) # apply learning multiplier on on embedding learning rate embeds = [word_emb.weights] grads_and_vars = self.opt.compute_gradients(self.y_loss) found = 0 for i, (grad, var) in enumerate(grads_and_vars): if var in embeds: found += 1 grad = tf.scalar_mul(self.embedding_factor, grad) grads_and_vars[i] = (grad, var) assert found == len(embeds) # internal consistency check self.train_step = self.opt.apply_gradients(grads_and_vars) self.sess.run(tf.global_variables_initializer()) self.saver = tf.train.Saver(max_to_keep=100)
def __init__(self, config, kb, text_seq_batch, seq_length_batch, num_vocab_txt, num_vocab_nmn, EOS_idx, num_choices, decoder_sampling, use_gt_layout=None, gt_layout_batch=None, scope='neural_module_network', reuse=None): with tf.variable_scope(scope, reuse=reuse): # Part 1: Seq2seq RNN to generate module layout tokens embedding_mat = tf.get_variable( 'embedding_mat', [num_vocab_txt, config.embed_dim_txt], initializer=tf.contrib.layers.xavier_initializer()) with tf.variable_scope('layout_generation'): att_seq2seq = netgen_att.AttentionSeq2Seq( config, text_seq_batch, seq_length_batch, num_vocab_txt, num_vocab_nmn, EOS_idx, decoder_sampling, embedding_mat, use_gt_layout, gt_layout_batch) self.att_seq2seq = att_seq2seq predicted_tokens = att_seq2seq.predicted_tokens token_probs = att_seq2seq.token_probs word_vecs = att_seq2seq.word_vecs neg_entropy = att_seq2seq.neg_entropy self.atts = att_seq2seq.atts self.predicted_tokens = predicted_tokens self.token_probs = token_probs self.word_vecs = word_vecs self.neg_entropy = neg_entropy # log probability of each generated sequence self.log_seq_prob = tf.reduce_sum(tf.log(token_probs), axis=0) # Part 2: Neural Module Network with tf.variable_scope('layout_execution'): modules = Modules(config, kb, word_vecs, num_choices, embedding_mat) self.modules = modules # Recursion of modules att_shape = [len(kb)] # Forward declaration of module recursion att_expr_decl = td.ForwardDeclaration(td.PyObjectType(), td.TensorType(att_shape)) # _key_find case_key_find = td.Record([ ('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32')) ]) case_key_find = case_key_find >> td.ScopedLayer( modules.KeyFindModule, name_or_scope='KeyFindModule') # _key_filter case_key_filter = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32')) ]) case_key_filter = case_key_filter >> td.ScopedLayer( modules.KeyFilterModule, name_or_scope='KeyFilterModule') recursion_cases = td.OneOf(td.GetItem('module'), { '_key_find': case_key_find, '_key_filter': case_key_filter }) att_expr_decl.resolve_to(recursion_cases) # _val_desc: output scores for choice (for valid expressions) predicted_scores = td.Record([('input_0', recursion_cases), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32')) ]) predicted_scores = predicted_scores >> td.ScopedLayer( modules.ValDescribeModule, name_or_scope='ValDescribeModule') # For invalid expressions, define a dummy answer # so that all answers have the same form INVALID = assembler.INVALID_EXPR dummy_scores = td.Void() >> td.FromTensor( np.zeros(num_choices, np.float32)) output_scores = td.OneOf(td.GetItem('module'), { '_val_desc': predicted_scores, INVALID: dummy_scores }) # compile and get the output scores self.compiler = td.Compiler.create(output_scores) self.scores = self.compiler.output_tensors[0] # Regularization: Entropy + L2 self.entropy_reg = tf.reduce_mean(neg_entropy) module_weights = [ v for v in tf.trainable_variables() if (scope in v.op.name and v.op.name.endswith('weights')) ] self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights])
def __init__(self, image_data_batch, image_mean, text_seq_batch, seq_length_batch, T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, assembler, encoder_dropout, decoder_dropout, decoder_sampling, num_choices, use_qpn, qpn_dropout, reduce_visfeat_dim=False, new_visfeat_dim=128, use_gt_layout=None, gt_layout_batch=None, map_dim=1024, scope='neural_module_network', reuse=None): with tf.variable_scope(scope, reuse=reuse): # Part 0: Visual feature from CNN with tf.variable_scope('image_feature_cnn'): image_data_batch = image_data_batch / 255.0 - image_mean image_feat_grid = nlvr_convnet(image_data_batch) self.image_feat_grid = image_feat_grid # Part 1: Seq2seq RNN to generate module layout tokensa with tf.variable_scope('layout_generation'): att_seq2seq = AttentionSeq2Seq( text_seq_batch, seq_length_batch, T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, assembler, encoder_dropout, decoder_dropout, decoder_sampling, use_gt_layout, gt_layout_batch) self.att_seq2seq = att_seq2seq predicted_tokens = att_seq2seq.predicted_tokens token_probs = att_seq2seq.token_probs word_vecs = att_seq2seq.word_vecs neg_entropy = att_seq2seq.neg_entropy self.atts = att_seq2seq.atts self.predicted_tokens = predicted_tokens self.token_probs = token_probs self.word_vecs = word_vecs self.neg_entropy = neg_entropy # log probability of each generated sequence self.log_seq_prob = tf.reduce_sum(tf.log(token_probs), axis=0) # Part 2: Neural Module Network with tf.variable_scope('layout_execution'): modules = Modules(image_feat_grid, word_vecs, None, num_choices, map_dim) self.modules = modules # Recursion of modules att_shape = image_feat_grid.get_shape().as_list()[1:-1] + [1] # Forward declaration of module recursion att_expr_decl = td.ForwardDeclaration(td.PyObjectType(), td.TensorType(att_shape)) # _Find case_find = td.Record([('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32')) ]) case_find = case_find >> td.Function(modules.FindModule) # _Transform case_transform = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_transform = case_transform >> td.Function( modules.TransformModule) # _And case_and = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_and = case_and >> td.Function(modules.AndModule) # _Describe case_describe = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_describe = case_describe >> \ td.Function(modules.DescribeModule) recursion_cases = td.OneOf( td.GetItem('module'), { '_Find': case_find, '_Transform': case_transform, '_And': case_and }) att_expr_decl.resolve_to(recursion_cases) # For invalid expressions, define a dummy answer # so that all answers have the same form dummy_scores = td.Void() >> td.FromTensor( np.zeros(num_choices, np.float32)) output_scores = td.OneOf(td.GetItem('module'), { '_Describe': case_describe, INVALID_EXPR: dummy_scores }) # compile and get the output scores self.compiler = td.Compiler.create(output_scores) self.scores_nmn = self.compiler.output_tensors[0] # Add a question prior network if specified self.use_qpn = use_qpn self.qpn_dropout = qpn_dropout if use_qpn: self.scores_qpn = question_prior_net( att_seq2seq.encoder_states, num_choices, qpn_dropout) self.scores = self.scores_nmn + self.scores_qpn #self.scores = self.scores_nmn else: self.scores = self.scores_nmn # Regularization: Entropy + L2 self.entropy_reg = tf.reduce_mean(neg_entropy) #tf.check_numerics(self.entropy_reg, 'entropy NaN/Inf ') #print(self.entropy_reg.eval()) module_weights = [ v for v in tf.trainable_variables() if (scope in v.op.name and v.op.name.endswith('weights')) ] self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights])
output_transform = td.FC(1, activation=None) bidir_common = (td.ZipWith( td.Concat() >> output_transform >> td.Metric('logits'))).reads( forward_dir, back_to_leftright) bidir_conv_lstm.output.reads(bidir_common) return bidir_conv_lstm CONV_data = td.Record((td.Map( td.Vector(vsize) >> td.Function(lambda x: tf.reshape(x, [-1, vsize, 1]))), td.Map(td.Scalar()))) CONV_model = (CONV_data >> bidirectional_dynamic_CONV( multi_convLSTM_cell([vsize, vsize, vsize], [100, 100, 100]), multi_convLSTM_cell([vsize, vsize, vsize], [100, 100, 100])) >> td.Void()) FC_data = td.Record((td.Map(td.Vector(vsize)), td.Map(td.Scalar()))) FC_model = (FC_data >> bidirectional_dynamic_FC(multi_FC_cell( [1000] * 5), multi_FC_cell([1000] * 5), 1000) >> td.Void()) store = data(FLAGS.data_dir + FLAGS.data_type, FLAGS.truncate) if FLAGS.model == "lstm": model = FC_model elif FLAGS.model == "convlstm": model = CONV_model else: raise NotImplemented compiler = td.Compiler.create(model)
def __init__(self, image_batch, text_seq_batch, seq_length_batch, T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, EOS_idx, encoder_dropout, decoder_dropout, decoder_sampling, num_choices, use_gt_layout=None, gt_layout_batch=None, scope='neural_module_network', reuse=None): with tf.variable_scope(scope, reuse=reuse): # Part 0: Visual feature from CNN with tf.variable_scope('image_feature_cnn'): image_feat_grid = shapes_convnet(image_batch) self.image_feat_grid = image_feat_grid # Part 1: Seq2seq RNN to generate module layout tokens with tf.variable_scope('layout_generation'): att_seq2seq = nmn3_netgen_att.AttentionSeq2Seq( text_seq_batch, seq_length_batch, T_decoder, num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, EOS_idx, encoder_dropout, decoder_dropout, decoder_sampling, use_gt_layout, gt_layout_batch) self.att_seq2seq = att_seq2seq predicted_tokens = att_seq2seq.predicted_tokens token_probs = att_seq2seq.token_probs word_vecs = att_seq2seq.word_vecs neg_entropy = att_seq2seq.neg_entropy self.atts = att_seq2seq.atts self.predicted_tokens = predicted_tokens self.token_probs = token_probs self.word_vecs = word_vecs self.neg_entropy = neg_entropy # log probability of each generated sequence self.log_seq_prob = tf.reduce_sum(tf.log(token_probs), axis=0) # Part 2: Neural Module Network with tf.variable_scope('layout_execution'): modules = Modules(image_feat_grid, word_vecs, num_choices) self.modules = modules # Recursion of modules att_shape = image_feat_grid.get_shape().as_list()[1:-1] + [1] # Forward declaration of module recursion att_expr_decl = td.ForwardDeclaration(td.PyObjectType(), td.TensorType(att_shape)) # _Find case_find = td.Record([('time_idx', td.Scalar(dtype='int32')), ('batch_idx', td.Scalar(dtype='int32')) ]) case_find = case_find >> \ td.ScopedLayer(modules.FindModule, name_or_scope='FindModule') # _Transform case_transform = td.Record([('input_0', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_transform = case_transform >> \ td.ScopedLayer(modules.TransformModule, name_or_scope='TransformModule') # _And case_and = td.Record([('input_0', att_expr_decl()), ('input_1', att_expr_decl()), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32'))]) case_and = case_and >> \ td.ScopedLayer(modules.AndModule, name_or_scope='AndModule') recursion_cases = td.OneOf( td.GetItem('module'), { '_Find': case_find, '_Transform': case_transform, '_And': case_and }) att_expr_decl.resolve_to(recursion_cases) # _Answer: output scores for choice (for valid expressions) predicted_scores = td.Record([('input_0', recursion_cases), ('time_idx', td.Scalar('int32')), ('batch_idx', td.Scalar('int32')) ]) predicted_scores = predicted_scores >> \ td.ScopedLayer(modules.AnswerModule, name_or_scope='AnswerModule') # For invalid expressions, define a dummy answer # so that all answers have the same form INVALID = nmn3_assembler.INVALID_EXPR dummy_scores = td.Void() >> td.FromTensor( np.zeros(num_choices, np.float32)) output_scores = td.OneOf(td.GetItem('module'), { '_Answer': predicted_scores, INVALID: dummy_scores }) # compile and get the output scores self.compiler = td.Compiler.create(output_scores) self.scores = self.compiler.output_tensors[0] # Regularization: Entropy + L2 self.entropy_reg = tf.reduce_mean(neg_entropy) module_weights = [ v for v in tf.trainable_variables() if (scope in v.op.name and v.op.name.endswith('weights')) ] self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights])
def build_VAE(z_size, token_emb_size): c = td.Composition() c.set_input_type(td.SequenceType(td.TensorType(([token_emb_size]), 'float32'))) with c.scope(): # input_sequence = td.Map(td.Vector(token_emb_size)).reads(c.input) input_sequence = c.input # encoder composition TODO: refactor this out # rnn_cell = td.ScopedLayer( # tf.contrib.rnn.LSTMCell( # num_units=2*z_size, # initializer=tf.contrib.layers.xavier_initializer(), # activation=tf.tanh # ), # 'encoder' # ) encoder_rnn_cell = td.ScopedLayer( tf.contrib.rnn.GRUCell( num_units=2*z_size, # initializer=tf.contrib.layers.xavier_initializer(), activation=tf.tanh ), 'encoder' ) output_sequence = td.RNN(encoder_rnn_cell) >> td.GetItem(0) mus_and_log_sigs = output_sequence >> td.GetItem(-1) # reparam_z = mus_and_log_sigs >> td.Function(resampling) reparam_z = td.Function(resampling, name='resampling') reparam_z.set_input_type(td.TensorType((2 * z_size,))) reparam_z.set_output_type(td.TensorType((z_size,))) # A list of same length of input_sequence, but with empty values # this is used for the decoder to map over list_of_nothing = td.Map( td.Void() >> td.FromTensor(tf.zeros((0,))) ) # decoder composition # TODO: refactor this out # decoder_rnn = td.ScopedLayer( # tf.contrib.rnn.LSTMCell( # num_units=z_size, # initializer=tf.contrib.layers.xavier_initializer(), # activation=tf.tanh # ), # 'decoder' # ) decoder_rnn = td.ScopedLayer( tf.contrib.rnn.GRUCell( num_units=z_size, # initializer=tf.contrib.layers.xavier_initializer(), activation=tf.tanh ), 'decoder' ) decoder_rnn_output = td.RNN( decoder_rnn, initial_state_from_input=True ) >> td.GetItem(0) fc_layer = td.FC( token_emb_size, activation=tf.nn.relu, initializer=tf.contrib.layers.xavier_initializer() ) un_normalised_token_probs = decoder_rnn_output >> td.Map(fc_layer) # reparam_z.reads(input_sequence) mus_and_log_sigs.reads(input_sequence) reparam_z.reads(mus_and_log_sigs) list_of_nothing.reads(input_sequence) un_normalised_token_probs.reads(list_of_nothing, reparam_z) c.output.reads(un_normalised_token_probs, mus_and_log_sigs) return c
un_normalised_token_probs = td.GetItem(0).reads(network_output) mus_and_log_sigs = td.GetItem(1).reads(network_output) cross_entropy_loss = td.ZipWith(td.Function(softmax_crossentropy)) >> td.Mean() cross_entropy_loss.reads( un_normalised_token_probs, input_sequence ) kl_loss = td.Function(kl_divergence) kl_loss.reads(mus_and_log_sigs) td.Metric('cross_entropy_loss').reads(cross_entropy_loss) td.Metric('kl_loss').reads(kl_loss) c.output.reads(td.Void()) # Tokenised version of my code example_input = np.array([ 1, 2, 51, 16, 4, 17, 52, 3, 53, 16, 5, 38, 6, 37, 6, 37, 6, 37, 6, 38, 6, 37, 6, 37, 6, 38, 53, 16, 8, 9, 10, 11, 12, 13, 14, 7, 51, 17, 11, 48, 11, 8, 52, 53, 17, 5, 37, 6, 38, 6, 37, 6, 38, 6, 38, 53, 17, 8, 9, 10, 11, 7, 51, 9, 26, 51, 20, 9, 9, 52, 52, 0 ]) def one_hotify(code_rep): one_hots = np.zeros((len(code_rep), 54), dtype='int8')