Example #1
0
def feature_detector_blk(max_depth=2):
    """Input: node dict
    Output: TensorType([hyper.conv_dim, ])
    Single patch of the conv. Depth is max_depth
    """
    blk = td.Composition()
    with blk.scope():
        nodes_in_patch = collect_node_for_conv_patch_blk(
            max_depth=max_depth).reads(blk.input)

        # map from python object to tensors
        mapped = td.Map(
            td.Record((coding_blk(), td.Scalar(), td.Scalar(), td.Scalar(),
                       td.Scalar()))).reads(nodes_in_patch)
        # mapped = [(feature, idx, depth, max_depth), (...)]

        # compute weighted feature for each elem
        weighted = td.Map(weighted_feature_blk()).reads(mapped)
        # weighted = [fea, fea, fea, ...]

        # add together
        added = td.Reduce(td.Function(tf.add)).reads(weighted)
        # added = TensorType([hyper.conv_dim, ])

        # add bias
        biased = td.Function(tf.add).reads(added,
                                           td.FromTensor(param.get('Bconv')))
        # biased = TensorType([hyper.conv_dim, ])

        # tanh
        tanh = td.Function(tf.nn.tanh).reads(biased)
        # tanh = TensorType([hyper.conv_dim, ])

        blk.output.reads(tanh)
    return blk
Example #2
0
def composed_embed_blk():
    leaf_case = direct_embed_blk()
    nonleaf_case = td.Composition(name='composed_embed_nonleaf')
    with nonleaf_case.scope():
        children = td.GetItem('children').reads(nonleaf_case.input)
        clen = td.Scalar().reads(td.GetItem('clen').reads(nonleaf_case.input))
        cclens = td.Map(td.GetItem('clen') >> td.Scalar()).reads(children)
        fchildren = td.Map(direct_embed_blk()).reads(children)

        initial_state = td.Composition()
        with initial_state.scope():
            initial_state.output.reads(
                td.FromTensor(tf.zeros(hyper.word_dim)),
                td.FromTensor(tf.zeros([])),
            )
        summed = td.Zip().reads(fchildren, cclens, td.Broadcast().reads(clen))
        summed = td.Fold(continous_weighted_add_blk(),
                         initial_state).reads(summed)[0]
        added = td.Function(tf.add, name='add_bias').reads(
            summed, td.FromTensor(param.get('B')))
        normed = clip_by_norm_blk().reads(added)

        act_fn = tf.nn.relu if hyper.use_relu else tf.nn.tanh
        relu = td.Function(act_fn).reads(normed)
        nonleaf_case.output.reads(relu)

    return td.OneOf(lambda node: node['clen'] == 0, {
        True: leaf_case,
        False: nonleaf_case
    })
def build_train_graph_for_RVAE(rvae_block, look_behind_length=0):
    token_emb_size = get_size_of_input_vecotrs(rvae_block)

    c = td.Composition()
    with c.scope():
        padded_input_sequence = td.Map(td.Vector(token_emb_size)).reads(
            c.input)
        network_output = rvae_block
        network_output.reads(padded_input_sequence)

        un_normalised_token_probs = td.GetItem(0).reads(network_output)
        mus_and_log_sigs = td.GetItem(1).reads(network_output)

        input_sequence = td.Slice(
            start=look_behind_length).reads(padded_input_sequence)
        # TODO: metric that output of rnn is the same as input sequence
        cross_entropy_loss = td.ZipWith(
            td.Function(softmax_crossentropy)) >> td.Mean()
        cross_entropy_loss.reads(un_normalised_token_probs, input_sequence)
        kl_loss = td.Function(kl_divergence)
        kl_loss.reads(mus_and_log_sigs)

        td.Metric('cross_entropy_loss').reads(cross_entropy_loss)
        td.Metric('kl_loss').reads(kl_loss)

        c.output.reads(td.Void())

    return c
def build_decoder_block_for_analysis(z_size, token_emb_size, decoder_cell,
                                     input_size):

    c = td.Composition()
    c.set_input_type(
        td.TupleType(td.TensorType((z_size, )),
                     td.SequenceType(td.TensorType((input_size, )))))
    with c.scope():
        hidden_state = td.GetItem(0).reads(c.input)
        rnn_input = td.GetItem(1).reads(c.input)

        # decoder_output = build_program_decoder_for_analysis(
        #     token_emb_size, default_gru_cell(z_size)
        # )
        decoder_output = decoder_cell

        decoder_output.reads(rnn_input, hidden_state)
        decoder_rnn_output = td.GetItem(1).reads(decoder_output)
        un_normalised_token_probs = td.GetItem(0).reads(decoder_output)
        # get the first output (meant to only compute one interation)
        c.output.reads(
            td.GetItem(0).reads(un_normalised_token_probs),
            td.GetItem(0).reads(decoder_rnn_output))

    return td.Record((td.Vector(z_size), td.Map(td.Vector(input_size)))) >> c
Example #5
0
 def linearLSTM_over_TreeLstm(self, num_classes, sent_lstm_num_units):
     self.sent_cell = td.ScopedLayer(tf.contrib.rnn.BasicLSTMCell(
         num_units=sent_lstm_num_units), name_or_scope = self._sent_lstm_default_scope_name)
     sent_lstm = (td.Map(self.tree_lstm.tree_lstm()
                         >> td.Concat()) >> td.RNN(self.sent_cell))
     self.output_layer = td.FC(
         num_classes, activation=None, name=self._output_layer_default_scope_name)
     return (td.Scalar('int32'), sent_lstm >> td.GetItem(1)
             >> td.GetItem(0) >> self.output_layer) \
         >> self.set_metrics()
Example #6
0
def buid_sentence_expression():
    sentence_tree = td.InputTransform(lambda sentence_json: WNJsonDecoder(sentence_json))

    tree_rnn = td.ForwardDeclaration(td.PyObjectType())
    leaf_case = td.GetItem('word_vec', name='leaf_in') >> td.Vector(embedding_size)
    index_case = td.Record({'children': td.Map(tree_rnn()) >> td.Mean(), 'word_vec': td.Vector(embedding_size)}, name='index_in') >> td.Concat(name='concat_root_child') >> td.FC(embedding_size, name='FC_root_child')
    expr_sentence = td.OneOf(td.GetItem('leaf'), {True: leaf_case, False: index_case}, name='recur_in')
    tree_rnn.resolve_to(expr_sentence)

    return sentence_tree >> expr_sentence
def build_token_level_RVAE(z_size, token_emb_size, look_behind_length):
    c = td.Composition()
    c.set_input_type(
        td.SequenceType(td.TensorType(([token_emb_size]), 'float32')))
    with c.scope():
        padded_input_sequence = c.input
        # build encoder block
        encoder_rnn_cell = build_program_encoder(default_gru_cell(2 * z_size))

        output_sequence = td.RNN(encoder_rnn_cell) >> td.GetItem(0)
        mus_and_log_sigs = output_sequence >> td.GetItem(-1)

        reparam_z = resampling_block(z_size)

        if look_behind_length > 0:
            decoder_input_sequence = (
                td.Slice(stop=-1) >> td.NGrams(look_behind_length) >> td.Map(
                    td.Concat()))
        else:
            decoder_input_sequence = td.Map(
                td.Void() >> td.FromTensor(tf.zeros((0, ))))

        # build decoder block
        un_normalised_token_probs = build_program_decoder(
            token_emb_size, default_gru_cell(z_size), just_tokens=True)

        # remove padding for input sequence
        input_sequence = td.Slice(start=look_behind_length)
        input_sequence.reads(padded_input_sequence)

        mus_and_log_sigs.reads(input_sequence)
        reparam_z.reads(mus_and_log_sigs)

        decoder_input_sequence.reads(padded_input_sequence)
        td.Metric('encoder_sequence_length').reads(
            td.Length().reads(input_sequence))
        td.Metric('decoder_sequence_length').reads(
            td.Length().reads(decoder_input_sequence))
        un_normalised_token_probs.reads(decoder_input_sequence, reparam_z)

        c.output.reads(un_normalised_token_probs, mus_and_log_sigs)
    return c
def build_program_decoder(token_emb_size, rnn_cell, just_tokens=False):
    """
    Used for blind or 'look-behind' decoders
    """
    decoder_rnn = td.ScopedLayer(rnn_cell, 'decoder')
    decoder_rnn_output = td.RNN(decoder_rnn,
                                initial_state_from_input=True) >> td.GetItem(0)

    fc_layer = td.FC(
        token_emb_size,
        activation=tf.nn.relu,
        initializer=tf.contrib.layers.xavier_initializer(),
        name='encoder_fc'  # this is fantastic
    )

    # un_normalised_token_probs = decoder_rnn_output >> td.Map(fc_layer)
    if just_tokens:
        return decoder_rnn_output >> td.Map(fc_layer)
    else:
        return decoder_rnn_output >> td.AllOf(td.Map(fc_layer), td.Identity())
Example #9
0
def tree_sum_blk(loss_blk):
    # traverse the tree to sum up the loss
    tree_sum_fwd = td.ForwardDeclaration(td.PyObjectType(), td.TensorType([]))
    tree_sum = td.Composition()
    with tree_sum.scope():
        myloss = loss_blk().reads(tree_sum.input)
        children = td.GetItem('children').reads(tree_sum.input)

        mapped = td.Map(tree_sum_fwd()).reads(children)
        summed = td.Reduce(td.Function(tf.add)).reads(mapped)
        summed = td.Function(tf.add).reads(summed, myloss)
        tree_sum.output.reads(summed)
    tree_sum_fwd.resolve_to(tree_sum)
    return tree_sum
def build_program_decoder_for_analysis(token_emb_size, rnn_cell):
    """
    Does the same as build_program_decoder_for_analysis, but also returns
        the final hidden state of the decoder
    """
    decoder_rnn = td.ScopedLayer(rnn_cell, 'decoder')
    decoder_rnn_output = td.RNN(decoder_rnn,
                                initial_state_from_input=True) >> td.GetItem(0)

    fc_layer = td.FC(token_emb_size,
                     activation=tf.nn.relu,
                     initializer=tf.contrib.layers.xavier_initializer(),
                     name='encoder_fc')
    # decoder_rnn_output.reads()
    un_normalised_token_probs = td.Map(fc_layer)
    return decoder_rnn_output >> td.AllOf(un_normalised_token_probs,
                                          td.Identity())
Example #11
0
def bidirectional_dynamic_FC(fw_cell, bw_cell, hidden):
    bidir_conv_lstm = td.Composition()
    with bidir_conv_lstm.scope():
        fw_seq = td.Identity().reads(bidir_conv_lstm.input[0])
        labels = (
            td.GetItem(1) >> td.Map(td.Metric("labels")) >> td.Void()).reads(
                bidir_conv_lstm.input)
        bw_seq = td.Slice(step=-1).reads(fw_seq)

        forward_dir = (td.RNN(fw_cell) >> td.GetItem(0)).reads(fw_seq)
        back_dir = (td.RNN(bw_cell) >> td.GetItem(0)).reads(bw_seq)
        back_to_leftright = td.Slice(step=-1).reads(back_dir)

        output_transform = td.FC(1, activation=None)

        bidir_common = (td.ZipWith(
            td.Concat() >> output_transform >> td.Metric('logits'))).reads(
                forward_dir, back_to_leftright)

        bidir_conv_lstm.output.reads(bidir_common)
    return bidir_conv_lstm
Example #12
0
def dynamic_pooling_blk():
    """Input: root node dic
    Output: pooled, TensorType([hyper.conv_dim, ])
    """
    leaf_case = feature_detector_blk()

    pool_fwd = td.ForwardDeclaration(td.PyObjectType(),
                                     td.TensorType([
                                         hyper.conv_dim,
                                     ]))
    pool = td.Composition()
    with pool.scope():
        cur_fea = feature_detector_blk().reads(pool.input)
        children = td.GetItem('children').reads(pool.input)

        mapped = td.Map(pool_fwd()).reads(children)
        summed = td.Reduce(td.Function(tf.maximum)).reads(mapped)
        summed = td.Function(tf.maximum).reads(summed, cur_fea)
        pool.output.reads(summed)
    pool = td.OneOf(lambda x: x['clen'] == 0, {True: leaf_case, False: pool})
    pool_fwd.resolve_to(pool)
    return pool
Example #13
0
        forward_dir = (td.RNN(fw_cell) >> td.GetItem(0)).reads(fw_seq)
        back_dir = (td.RNN(bw_cell) >> td.GetItem(0)).reads(bw_seq)
        back_to_leftright = td.Slice(step=-1).reads(back_dir)

        output_transform = td.FC(1, activation=None)

        bidir_common = (td.ZipWith(
            td.Concat() >> output_transform >> td.Metric('logits'))).reads(
                forward_dir, back_to_leftright)

        bidir_conv_lstm.output.reads(bidir_common)
    return bidir_conv_lstm


CONV_data = td.Record((td.Map(
    td.Vector(vsize) >> td.Function(lambda x: tf.reshape(x, [-1, vsize, 1]))),
                       td.Map(td.Scalar())))
CONV_model = (CONV_data >> bidirectional_dynamic_CONV(
    multi_convLSTM_cell([vsize, vsize, vsize], [100, 100, 100]),
    multi_convLSTM_cell([vsize, vsize, vsize], [100, 100, 100])) >> td.Void())

FC_data = td.Record((td.Map(td.Vector(vsize)), td.Map(td.Scalar())))
FC_model = (FC_data >> bidirectional_dynamic_FC(multi_FC_cell(
    [1000] * 5), multi_FC_cell([1000] * 5), 1000) >> td.Void())

store = data(FLAGS.data_dir + FLAGS.data_type, FLAGS.truncate)

if FLAGS.model == "lstm":
    model = FC_model
elif FLAGS.model == "convlstm":
    model = CONV_model
Example #14
0
def build_VAE(z_size, token_emb_size):
    c = td.Composition()
    c.set_input_type(td.SequenceType(td.TensorType(([token_emb_size]), 'float32')))
    with c.scope():
        # input_sequence = td.Map(td.Vector(token_emb_size)).reads(c.input)
        input_sequence = c.input

        # encoder composition TODO: refactor this out
        # rnn_cell = td.ScopedLayer(
        #     tf.contrib.rnn.LSTMCell(
        #         num_units=2*z_size,
        #         initializer=tf.contrib.layers.xavier_initializer(),
        #         activation=tf.tanh
        #     ),
        #     'encoder'
        # )
        encoder_rnn_cell = td.ScopedLayer(
            tf.contrib.rnn.GRUCell(
                num_units=2*z_size,
                # initializer=tf.contrib.layers.xavier_initializer(),
                activation=tf.tanh
            ),
            'encoder'
        )
        output_sequence = td.RNN(encoder_rnn_cell) >> td.GetItem(0)
        mus_and_log_sigs = output_sequence >> td.GetItem(-1)

        # reparam_z = mus_and_log_sigs >> td.Function(resampling)
        reparam_z = td.Function(resampling, name='resampling')
        reparam_z.set_input_type(td.TensorType((2 * z_size,)))
        reparam_z.set_output_type(td.TensorType((z_size,)))

        #  A list of same length of input_sequence, but with empty values
        #  this is used for the decoder to map over
        list_of_nothing = td.Map(
            td.Void() >> td.FromTensor(tf.zeros((0,)))
        )

        # decoder composition
        # TODO: refactor this out
        # decoder_rnn = td.ScopedLayer(
        #     tf.contrib.rnn.LSTMCell(
        #         num_units=z_size,
        #         initializer=tf.contrib.layers.xavier_initializer(),
        #         activation=tf.tanh
        #     ),
        #     'decoder'
        # )
        decoder_rnn = td.ScopedLayer(
            tf.contrib.rnn.GRUCell(
                num_units=z_size,
                # initializer=tf.contrib.layers.xavier_initializer(),
                activation=tf.tanh
            ),
            'decoder'
        )
        decoder_rnn_output = td.RNN(
            decoder_rnn,
            initial_state_from_input=True
        ) >> td.GetItem(0)

        fc_layer = td.FC(
            token_emb_size,
            activation=tf.nn.relu,
            initializer=tf.contrib.layers.xavier_initializer()
        )

        un_normalised_token_probs = decoder_rnn_output >> td.Map(fc_layer)

        # reparam_z.reads(input_sequence)
        mus_and_log_sigs.reads(input_sequence)
        reparam_z.reads(mus_and_log_sigs)
        list_of_nothing.reads(input_sequence)
        un_normalised_token_probs.reads(list_of_nothing, reparam_z)

        c.output.reads(un_normalised_token_probs, mus_and_log_sigs)
    return c
def build_encoder(z_size, token_emb_size):
    input_sequence = td.Map(td.Vector(token_emb_size))
    encoder_rnn_cell = build_program_encoder(default_gru_cell(2 * z_size))
    output_sequence = td.RNN(encoder_rnn_cell) >> td.GetItem(0)
    mus_and_log_sigs = output_sequence >> td.GetItem(-1)
    return input_sequence >> mus_and_log_sigs
Example #16
0
    def _compile(self):
        with self.sess.as_default(): 
            import tensorflow_fold as td
        
        output_size = len(self.labels)
        self.keep_prob = tf.placeholder_with_default(tf.constant(1.0),shape=None)

        fshape = (self.window_size * (self.char_embedding_size + self.char_feature_embedding_size), self.num_filters)
        filt_w3 = tf.Variable(tf.random_normal(fshape, stddev=0.05))

        def CNN_Window3(filters):
            return td.Function(lambda a, b, c: cnn_operation([a,b,c],filters))

        def cnn_operation(window_sequences,filters):
            windows = tf.concat(window_sequences,axis=-1)
            products = tf.multiply(tf.expand_dims(windows,axis=-1),filters)
            return tf.reduce_sum(products,axis=-2)

        char_emb = td.Embedding(num_buckets=self.char_buckets, 
                                num_units_out=self.char_embedding_size)
        
        cnn_layer = (td.NGrams(self.window_size) 
                        >> td.Map(CNN_Window3(filt_w3)) 
                        >> td.Max())

        # --------- char features
        
        def charfeature_lookup(c):
            if c in string.lowercase:
                return 0
            elif c in string.uppercase:
                return 1
            elif c in string.punctuation:
                return 2
            else:
                return 3

        char_input = td.Map(td.InputTransform(lambda c: ord(c.lower())) 
                            >> td.Scalar('int32') >> char_emb)
                            
        char_features = td.Map(td.InputTransform(charfeature_lookup) 
                            >> td.Scalar(dtype='int32') 
                            >> td.Embedding(num_buckets=4,
                                            num_units_out=self.char_feature_embedding_size))

        charlevel = (td.InputTransform(lambda s: ['~'] + [ c for c in s ] + ['~']) 
                        >> td.AllOf(char_input,char_features) >> td.ZipWith(td.Concat()) 
                        >> cnn_layer)        

        # --------- word features
        
        word_emb = td.Embedding(num_buckets=len(self.word_vocab),
                                num_units_out=self.embedding_size,
                                initializer=self.word_embeddings)
        
        wordlookup = lambda w: (self.word_vocab.index(w.lower()) 
                                if w.lower() in self.word_vocab else 0)
        
        wordinput = (td.InputTransform(wordlookup) 
                        >> td.Scalar(dtype='int32') 
                        >> word_emb)
        
        def wordfeature_lookup(w):
            if re.match('^[a-z]+$',w):
                return 0
            elif re.match('^[A-Z][a-z]+$',w):
                return 1
            elif re.match('^[A-Z]+$',w):
                return 2
            elif re.match('^[A-Za-z]+$',w):
                return 3
            else:
                return 4
        
        wordfeature = (td.InputTransform(wordfeature_lookup) 
                        >> td.Scalar(dtype='int32') 
                        >> td.Embedding(num_buckets=5,
                                num_units_out=32))
        
        #-----------
        
        rnn_fwdcell = td.ScopedLayer(tf.contrib.rnn.LSTMCell(
                        num_units=self.rnn_dim), 'lstm_fwd')
        fwdlayer = td.RNN(rnn_fwdcell) >> td.GetItem(0)
        
        rnn_bwdcell = td.ScopedLayer(tf.contrib.rnn.LSTMCell(
                        num_units=self.rnn_dim), 'lstm_bwd')
        bwdlayer = (td.Slice(step=-1) >> td.RNN(rnn_bwdcell) 
                    >> td.GetItem(0) >> td.Slice(step=-1))
        
        rnn_layer = td.AllOf(fwdlayer, bwdlayer) >> td.ZipWith(td.Concat())
        
        output_layer = td.FC(output_size, 
                             input_keep_prob=self.keep_prob, 
                             activation=None)
        
        wordlevel = td.AllOf(wordinput,wordfeature) >> td.Concat()
        
        network = (td.Map(td.AllOf(wordlevel,charlevel) >> td.Concat()) 
                        >> rnn_layer 
                        >> td.Map(output_layer) 
                        >> td.Map(td.Metric('y_out'))) >> td.Void()
    
        groundlabels = td.Map(td.Vector(output_size,dtype=tf.int32) 
                                >> td.Metric('y_true')) >> td.Void()
    
        self.compiler = td.Compiler.create((network, groundlabels))
        
        self.y_out = self.compiler.metric_tensors['y_out']
        self.y_true = self.compiler.metric_tensors['y_true']
        
        self.y_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
            logits=self.y_out,labels=self.y_true))

        self.y_prob = tf.nn.softmax(self.y_out)
        self.y_true_idx = tf.argmax(self.y_true,axis=-1)
        self.y_pred_idx = tf.argmax(self.y_prob,axis=-1)
        
        self.y_pred = tf.one_hot(self.y_pred_idx,depth=output_size,dtype=tf.int32)
        
        epoch_step = tf.Variable(0, trainable=False)
        self.epoch_step_op = tf.assign(epoch_step, epoch_step+1)
            
        lrate_decay = tf.train.exponential_decay(self.lrate, epoch_step, 1, self.decay)
            
        if self.optimizer == 'adam':
            self.opt = tf.train.AdamOptimizer(learning_rate=lrate_decay)
        elif self.optimizer == 'adagrad':
            self.opt = tf.train.AdagradOptimizer(learning_rate=lrate_decay,
                                                initial_accumulator_value=1e-08)
        elif self.optimizer == 'rmsprop':
            self.opt = tf.train.RMSPropOptimizer(learning_rate=lrate_decay,
                                                 epsilon=1e-08)
        else:
            raise Exception(('The optimizer {} is not in list of available ' 
                            + 'optimizers: default, adam, adagrad, rmsprop.')
                            .format(self.optimizer))
        
        # apply learning multiplier on on embedding learning rate
        embeds = [word_emb.weights]
        grads_and_vars = self.opt.compute_gradients(self.y_loss)
        found = 0
        for i, (grad, var) in enumerate(grads_and_vars):
            if var in embeds:
                found += 1
                grad = tf.scalar_mul(self.embedding_factor, grad)
                grads_and_vars[i] = (grad, var)
        
        assert found == len(embeds)  # internal consistency check
        self.train_step = self.opt.apply_gradients(grads_and_vars)        
        
        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver(max_to_keep=100)
Example #17
0
def reduce_net_block():
    net_block = td.Concat() >> td.FC(20) >> td.FC(20) >> td.FC(1, activation=None) >> td.Function(lambda xs: tf.squeeze(xs, axis=1))
    return td.Map(td.Scalar()) >> td.Reduce(net_block)
    def _compile(self):
        with self.sess.as_default(): 
            import tensorflow_fold as td
        
        output_size = len(self.labels)
        self.keep_prob = tf.placeholder_with_default(tf.constant(1.0),shape=None)

        char_emb = td.Embedding(num_buckets=self.char_buckets, 
                                num_units_out=self.embedding_size)
                                #initializer=tf.truncated_normal_initializer(stddev=0.15))
        char_cell = td.ScopedLayer(tf.contrib.rnn.LSTMCell(num_units=self.rnn_dim), 'char_cell')

        char_lstm = (td.InputTransform(lambda s: [ord(c) for c in s]) 
                    >> td.Map(td.Scalar('int32') >> char_emb) 
                    >> td.RNN(char_cell) >> td.GetItem(1) >> td.GetItem(1))        
        
        rnn_fwdcell = td.ScopedLayer(tf.contrib.rnn.LSTMCell(num_units=self.rnn_dim), 'lstm_fwd')
        fwdlayer = td.RNN(rnn_fwdcell) >> td.GetItem(0)
        
        rnn_bwdcell = td.ScopedLayer(tf.contrib.rnn.LSTMCell(num_units=self.rnn_dim), 'lstm_bwd')
        bwdlayer = (td.Slice(step=-1) >> td.RNN(rnn_bwdcell) 
                        >> td.GetItem(0) >> td.Slice(step=-1))
        
        pos_emb = td.Embedding(num_buckets=300,
                    num_units_out=32,
                    initializer=tf.truncated_normal_initializer(stddev=0.1))
        
        pos_x = (td.InputTransform(lambda x: x + 150)
                    >> td.Scalar(dtype='int32') 
                    >> pos_emb)
        
        pos_y = (td.InputTransform(lambda x: x + 150)
                    >> td.Scalar(dtype='int32') 
                    >> pos_emb)
        
        input_layer = td.Map(td.Record((char_lstm,pos_x,pos_y)) >> td.Concat())
        
        maxlayer = (td.AllOf(fwdlayer, bwdlayer) 
                    >> td.ZipWith(td.Concat()) 
                    >> td.Max())
        
        output_layer = (input_layer >> 
                        maxlayer >> td.FC(output_size, 
                                         input_keep_prob=self.keep_prob, 
                                         activation=None))

        self.compiler = td.Compiler.create((output_layer, 
                        td.Vector(output_size,dtype=tf.int32)))
                        
        self.y_out, self.y_true = self.compiler.output_tensors
        self.y_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
            logits=self.y_out,labels=self.y_true))

        self.y_prob = tf.nn.softmax(self.y_out)
        self.y_true_idx = tf.argmax(self.y_true,axis=1)
        self.y_pred_idx = tf.argmax(self.y_prob,axis=1)
        
        self.y_pred = tf.one_hot(self.y_pred_idx,depth=output_size,dtype=tf.int32)

        epoch_step = tf.Variable(0, trainable=False)
        self.epoch_step_op = tf.assign(epoch_step, epoch_step+1)
            
        lrate_decay = tf.train.exponential_decay(self.lrate, epoch_step, 1, self.decay)
            
        if self.optimizer == 'adam':
            self.opt = tf.train.AdamOptimizer(learning_rate=lrate_decay)
        elif self.optimizer == 'adagrad':
            self.opt = tf.train.AdagradOptimizer(learning_rate=lrate_decay,
                                                initial_accumulator_value=1e-08)
        elif self.optimizer == 'rmsprop' or self.optimizer == 'default':
            self.opt = tf.train.RMSPropOptimizer(learning_rate=lrate_decay,
                                                 epsilon=1e-08)
        else:
            raise Exception(('The optimizer {} is not in list of available ' 
                            + 'optimizers: default, adam, adagrad, rmsprop.')
                            .format(self.optimizer))
        
        # apply learning multiplier on on embedding learning rate
        embeds = [pos_emb.weights, char_emb.weights]
        grads_and_vars = self.opt.compute_gradients(self.y_loss)
        found = 0
        for i, (grad, var) in enumerate(grads_and_vars):
            if var in embeds:
                found += 1
                grad = tf.scalar_mul(self.embedding_factor, grad)
                grads_and_vars[i] = (grad, var)
        
        assert found == len(embeds)  # internal consistency check
        self.train_step = self.opt.apply_gradients(grads_and_vars)        
        
        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver(max_to_keep=100)
Example #19
0
    halfway = int(mus_and_log_sigs.get_shape()[1].value / 2)  # HACK: make this cleaner
    mus = mus_and_log_sigs[:, :halfway]
    log_sigs = mus_and_log_sigs[:, halfway:]

    kl_loss_term = -0.5 * tf.reduce_mean(
        1 + log_sigs - tf.square(mus) - tf.exp(log_sigs),
        axis=1
    )

    return kl_loss_term


c = td.Composition()
with c.scope():
    input_sequence = td.Map(td.Vector(54)).reads(c.input)

    # net = build_VAE(Z_SIZE, 54)
    # un_normalised_token_probs, mus_and_log_sigs = input_sequence >> build_VAE(Z_SIZE, 54)
    network_output = build_VAE(Z_SIZE, 54)

    network_output.reads(input_sequence)

    un_normalised_token_probs = td.GetItem(0).reads(network_output)
    mus_and_log_sigs = td.GetItem(1).reads(network_output)

    cross_entropy_loss = td.ZipWith(td.Function(softmax_crossentropy)) >> td.Mean()
    cross_entropy_loss.reads(
        un_normalised_token_probs,
        input_sequence
    )