예제 #1
0
def diagonal_lstm(inputs, scope='diagonal_lstm'):
    with tf.compat.v1.variable_scope(scope):

        skewed_inputs = skew(inputs, scope="skewed_i")

        input_to_state = conv2d(skewed_inputs,
                                64, [1, 1],
                                mask_type="b",
                                scope="i_to_s")
        column_wise_inputs = tf.transpose(input_to_state, perm=[0, 2, 1, 3])

        batch, width, height, channel = column_wise_inputs.get_shape().as_list(
        )
        rnn_inputs = tf.reshape(column_wise_inputs,
                                [-1, width, height * channel])

        cell = DiagonalLSTMCell(16, height, channel)

        outputs, states = tf.compat.v1.nn.dynamic_rnn(cell,
                                                      inputs=rnn_inputs,
                                                      dtype=tf.float32)
        width_first_outputs = tf.reshape(outputs, [-1, width, height, 16])

        skewed_outputs = tf.transpose(width_first_outputs, perm=[0, 2, 1, 3])
        outputs = unskew(skewed_outputs)

        return outputs
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(
            tf.reduce_mean(getattr(self, key_c), axis=-2))
        history_sec_embedded.append(
            tf.reduce_mean(getattr(self, key_s), axis=-2))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])

    attention_cha_output = din_attention(tf.reduce_mean(
        self.today_chapters_embedded, axis=-2),
                                         history_cha_emb,
                                         ATTENTION_SIZE,
                                         self.history_mask_cha_ph,
                                         stag="cha")

    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    attention_sec_output = din_attention(tf.reduce_mean(
        self.today_sections_embedded, axis=-2),
                                         history_sec_emb,
                                         ATTENTION_SIZE,
                                         self.history_mask_sec_ph,
                                         stag="sec")

    att_fea1 = tf.reduce_sum(attention_cha_output, -2)
    att_fea2 = tf.reduce_sum(attention_sec_output, -2)
    #atte_out = tf.concat([att_fea1,att_fea2],axis=-1)

    return att_fea1, att_fea2
예제 #3
0
파일: dbn.py 프로젝트: ysmiraak/lgm
 def __init__(self, dim, samples
              , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01)
              , ftype= tf.float32, scope= 'dbn'):
     self.dim, self.ftype = dim, ftype
     with tf.variable_scope(scope):
         self.rbm = tuple(
             Rbm(scope= "rbm{}".format(i)
                 , dim_v= dim_v
                 , dim_h= dim_h
                 , samples= samples
                 , init_w= init_w
                 , ftype= self.ftype)
             for i, (dim_v, dim_h) in enumerate(zip(dim, dim[1:]), 1))
         self.w = tuple(rbm.w for rbm in self.rbm[::-1])
         self.wg = tuple(tf.transpose(w) for w in self.w)
         self.wr = tuple(
             tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w)
             for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1))
         self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ())
         # wake
         self.v_ = self.rbm[0].v_
         with tf.name_scope('wake'):
             recogn = [self.v_]
             for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w)))
             self.recogn = tuple(recogn)
             recogn = recogn[::-1]
             eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype)
             self.wake = tuple(
                 w.assign_add(tf.matmul((sj - pj), sk, transpose_a= True) * eps).op
                 for w, sk, sj, pj in zip(
                         self.w, recogn, recogn[1:]
                         , (tf.sigmoid(tf.matmul(s, w))
                            for w, s in zip(self.wg, recogn))))
         # sleep
         top = self.rbm[-1]
         self.k_, (self.v, self.a) = top.k_, top.gibbs
         with tf.name_scope('sleep'):
             recons = [self.a, self.v]
             for w in self.wg[1::]: recons.append(binary(tf.matmul(recons[-1], w)))
             self.recons = tuple(recons)
             recons = recons[::-1]
             eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype)
             self.sleep = tuple(
                 w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op
                 for w, sj, sk, qk in zip(
                         self.wr, recons, recons[1:]
                         , (tf.sigmoid(tf.matmul(s, w))
                            for w, s in zip(self.wr, recons))))
         # the waking world is the amnesia of dream.
         self.v = self.recons[-1]
         with tf.name_scope('ances'):
             self.a = self.rbm[-1].h
             ances = [self.a]
             for w in self.wg: ances.append(binary(tf.matmul(ances[-1], w)))
             self.ances = ances[-1]
         self.step = 0
예제 #4
0
def skew(inputs, scope="skew"):

    with tf.compat.v1.name_scope(scope):

        batch, height, width, channel = inputs.get_shape().as_list()
        rows = tf.split(inputs, height, 1)

        new_width = width + height - 1
        new_rows = []

        for idx, row in enumerate(rows):
            transposed_row = tf.transpose(tf.squeeze(row, [1]), perm=[0, 2, 1])
            squeezed_row = tf.reshape(transposed_row, [-1, width])
            padded_row = tf.pad(squeezed_row,
                                paddings=((0, 0), (idx, height - 1 - idx)))

            unsqueezed_row = tf.reshape(padded_row, [-1, channel, new_width])
            untransposed_row = tf.transpose(unsqueezed_row, perm=[0, 2, 1])
            new_rows.append(untransposed_row)

        outputs = tf.stack(new_rows, axis=1, name="output")

    return outputs
예제 #5
0
def LSTM_Sentiment(input_tensor):

    #  Reference Paper: https://www.bioinf.jku.at/publications/older/2604.pdf

    lstmCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(1024)
    output_rnn, _ = tf.compat.v1.nn.dynamic_rnn(lstmCell,
                                                input_tensor,
                                                dtype=tf.float32)

    W_fc = tf.Variable(tf.random.truncated_normal([1024, 2]))
    b_fc = tf.Variable(tf.constant(0.1, shape=[2]))

    output_transposed = tf.transpose(output_rnn, perm=[1, 0, 2])
    output = tf.gather(output_transposed,
                       int(output_transposed.get_shape()[0]) - 1)

    return tf.identity(tf.matmul(output, W_fc) + b_fc, name="output")
def get_history_bgru_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"
        #  B 3 128   B 128  14 B 128    B 14 128
        history_cha_embedded.append(get_rnn_sum(getattr(self, key_c),
                                                "rnncha"))
        history_sec_embedded.append(get_rnn_sum(getattr(self, key_s),
                                                "rnnsec"))

    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    with tf.name_scope("GRU"):
        num_layers = 2
        HIDDEN_DIM = 128
        KEEP_PROB = 0.8
        with tf.name_scope('cell'):

            def build_cell(n, m):
                cell = tf.nn.rnn_cell.GRUCell(n)
                cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=m)
                return cell

            num_units = [HIDDEN_DIM // 2, HIDDEN_DIM]

            cell_fw = [build_cell(n, KEEP_PROB) for n in num_units]
            cell_bw = [build_cell(n, KEEP_PROB) for n in num_units]

        with tf.name_scope('gru'):
            biout, output_fw, output_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
                cell_fw,
                cell_bw,
                inputs=tf.concat([history_cha_emb, history_sec_emb], axis=-1),
                dtype=tf.float32,
                scope='cha')
            rnnoutput = tf.reduce_sum(biout, axis=-2)

    return rnnoutput
예제 #7
0
def get_history_gru_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    # [1,2][3][4,3,5][0,0]...  chap    14天
    # [][][]  sec
    # cha_sec 拼接后rnn  cha rnn  sec rnn  拼劲 或者不拼接
    #
    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"
        #  B 3 128   B 128  14 B 128    B 14 128
        history_cha_embedded.append(
            tf.reduce_mean(getattr(self, key_c), axis=-2))
        history_sec_embedded.append(
            tf.reduce_mean(getattr(self, key_s), axis=-2))

    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    with tf.name_scope("GRU"):
        num_layers = 2
        HIDDEN_DIM = 128
        KEEP_PROB = 0.8

        with tf.name_scope('gru1'):

            def get_cell():
                cell2 = tf.nn.rnn_cell.GRUCell(HIDDEN_DIM)
                cell2_ = tf.nn.rnn_cell.DropoutWrapper(
                    cell2, output_keep_prob=KEEP_PROB)
                return cell2_

            cells = [get_cell() for _ in range(num_layers)]
            Cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
            rnnoutput, _ = tf.nn.dynamic_rnn(cell=Cell,
                                             inputs=history_cha_emb,
                                             dtype=tf.float32,
                                             scope='cha')
            rnnoutput1 = tf.reduce_sum(rnnoutput, axis=-2)

    with tf.name_scope("GRU2"):
        num_layers = 2
        HIDDEN_DIM = 128
        KEEP_PROB = 0.8

        with tf.name_scope('gru2'):
            cells2 = []
            for _ in range(num_layers):
                cell = tf.nn.rnn_cell.GRUCell(HIDDEN_DIM)
                cell_ = tf.nn.rnn_cell.DropoutWrapper(
                    cell, output_keep_prob=KEEP_PROB)
                cells2.append(cell_)
            #cells2 = [get_cell2() for _ in range(num_layers)]
            Cell2 = tf.nn.rnn_cell.MultiRNNCell(cells2, state_is_tuple=True)
            rnnoutput2, _ = tf.nn.dynamic_rnn(cell=Cell2,
                                              inputs=history_sec_emb,
                                              dtype=tf.float32,
                                              scope='SEC')

            rnnoutput3 = tf.reduce_sum(rnnoutput2, axis=-2)

    #gru_out = tf.concat([rnnoutput1,rnnoutput3],axis=-1)

    return rnnoutput1, rnnoutput3
예제 #8
0
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(get_rnn_sum(getattr(self, key_c),
                                                "rnncha"))
        history_sec_embedded.append(get_rnn_sum(getattr(self, key_s),
                                                "rnnsec"))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    #dien
    with tf.name_scope('rnn_1'):
        rnn_outputs, _ = dynamic_rnn(GRUCell(HIDDEN_SIZE * 2),
                                     inputs=tf.concat(
                                         [history_cha_emb, history_sec_emb],
                                         axis=-1),
                                     sequence_length=self.history_len_ph,
                                     dtype=tf.float32,
                                     scope="gru1")
    with tf.name_scope('Attention_layer_1'):
        att_outputs, alphas = din_fcn_attention(tf.concat([
            get_rnn_sum(self.today_chapters_embedded, "rnncha"),
            get_rnn_sum(self.today_sections_embedded, "rnnsec")
        ],
                                                          axis=-1),
                                                rnn_outputs,
                                                ATTENTION_SIZE,
                                                self.history_mask_cha_ph,
                                                scope="1_1",
                                                softmax_stag=1,
                                                stag='1_1',
                                                mode='LIST',
                                                return_alphas=True)
    with tf.name_scope('rnn_2'):
        rnn_outputs2, final_state2 = dynamic_rnn(
            VecAttGRUCell(HIDDEN_SIZE * 2),
            inputs=rnn_outputs,
            att_scores=tf.expand_dims(alphas, -1),
            sequence_length=self.history_len_ph,
            dtype=tf.float32,
            scope="gru2")

    return final_state2