def build_fcn_net(self, inp, use_dice=False):
        with self.graph.as_default():
            self.saver = tf.train.Saver(max_to_keep=1)


            with tf.name_scope("Out"):
                bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1')
                dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1')
                if use_dice:
                    dnn1 = dice(dnn1, name='dice_1')
                else:
                    dnn1 = prelu(dnn1, 'prelu1')

                dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2')
                if use_dice:
                    dnn2 = dice(dnn2, name='dice_2')
                else:
                    dnn2 = prelu(dnn2, 'prelu2')
                dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3')
                self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

            with tf.name_scope('Metrics'):
                # Cross-entropy loss and optimizer initialization
                # 'core_type_ph': [1, 1, 0,..],

                ctr_loss = - tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
                self.loss = ctr_loss
                # tf.summary.scalar('loss', self.loss)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # Accuracy metric
                self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph), tf.float32))
                # tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(
            tf.reduce_mean(getattr(self, key_c), axis=-2))
        history_sec_embedded.append(
            tf.reduce_mean(getattr(self, key_s), axis=-2))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])

    attention_cha_output = din_attention(tf.reduce_mean(
        self.today_chapters_embedded, axis=-2),
                                         history_cha_emb,
                                         ATTENTION_SIZE,
                                         self.history_mask_cha_ph,
                                         stag="cha")

    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    attention_sec_output = din_attention(tf.reduce_mean(
        self.today_sections_embedded, axis=-2),
                                         history_sec_emb,
                                         ATTENTION_SIZE,
                                         self.history_mask_sec_ph,
                                         stag="sec")

    att_fea1 = tf.reduce_sum(attention_cha_output, -2)
    att_fea2 = tf.reduce_sum(attention_sec_output, -2)
    #atte_out = tf.concat([att_fea1,att_fea2],axis=-1)

    return att_fea1, att_fea2
def get_rnn_sum(input_seq,name='cha',nameln = 'sec'):
    
    #with tf.name_scope("GRU"):
    num_layers=2
    HIDDEN_DIM=128
    KEEP_PROB =0.8
    with tf.name_scope('cell'):
        def build_cell(n,m):
            cell = tf.nn.rnn_cell.GRUCell(n)                  
            cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=m)
            return cell

        num_units=[HIDDEN_DIM//2,HIDDEN_DIM//2]
        
        cell_fw = [build_cell(n,KEEP_PROB) for n in num_units]
        cell_bw = [build_cell(n,KEEP_PROB) for n in num_units]

    with tf.name_scope('gru'),tf.variable_scope("gru", reuse=tf.AUTO_REUSE):
        biout,output_fw,output_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
            cell_fw,cell_bw,inputs= input_seq,dtype=tf.float32,scope=name)

        #这里的 name 和上面 一样吗,还是 每个不一样,再加个name2 

        # lnout = tf.contrib.layers.layer_norm(
        # inputs=(biout + input_seq), begin_norm_axis=-1, begin_params_axis=-1, scope=nameln)
        

    lnout = tf.contrib.layers.layer_norm(
      inputs=(biout + input_seq), begin_norm_axis=-1, begin_params_axis=-1)
    
    rnnoutput = tf.reduce_mean(lnout, axis=-2)

        #attention_output = layer_norm(biout + input_seq)
    return rnnoutput
def get_history_sum_embedded(self):
    # TODO: add mask info for this operation
    his_days =['one', 'two', 'three', 'four','five','six','seven','eight','nine',
    'ten','eleven','twelve','thirteen','fourteen']

    for fir in his_days:
        key_c = "history_" + fir + "_chap_ph"
        embed_key_c = "history_" + fir + "_chap_embedded"
        setattr(self, embed_key_c,get_mask_zero_embedded(self.chapters_embeddings_var,
                                    getattr(self, key_c)))
    
        key_s = "history_" + fir + "_sec_ph"
        embed_key_s = "history_" + fir + "_sec_embedded"
        setattr(self, embed_key_s,get_mask_zero_embedded(self.sections_embeddings_var,
                                    getattr(self, key_s)))

        key_st = "style_" + fir + "_ph"
        embed_key_st = "history_" + fir + "_sty_embedded"
        setattr(self, embed_key_st,tf.nn.embedding_lookup(self.style_embeddings_var,
                                    getattr(self, key_st)))  
            

    chap = get_rnn_sum(self.history_one_chap_embedded,"rnncha")
    #b*x*128 b*128    b*(128*14)
    sec = get_rnn_sum(self.history_one_sec_embedded, "rnnsec")
    sty = self.history_one_sty_embedded

    for fir in his_days[:0:-1]:
        key_c = "history_" + fir + "_chap_embedded"
        chap = tf.concat([chap, get_rnn_sum(getattr(self, key_c), "rnncha")], axis=-1)
        key_s = "history_" + fir + "_sec_embedded"
        sec = tf.concat([sec, get_rnn_sum(getattr(self, key_s), "rnnsec")], axis=-1)
        
        key_st = "history_" + fir + "_sty_embedded"
        sty = tf.concat([sty, getattr(self, key_st)], axis=-1)
    
    history_chap_emb = tf.reshape(chap, [-1, HIS_DAYS, EMBEDDING_DIM])
    history_sec_emb = tf.reshape(sec, [-1, HIS_DAYS, EMBEDDING_DIM])
    history_sty_emb = tf.reshape(sty, [-1, HIS_DAYS, EMBEDDING_DIM])

    chap_mean = tf.reduce_mean(history_chap_emb, axis=-2)
    sec_mean = tf.reduce_mean(history_sec_emb, axis=-2)
    sty_mean = tf.reduce_mean(history_sty_emb, axis=-2)
    #return chap_mean, sec_mean

    return tf.concat([chap_mean, sec_mean,sty_mean], axis=-1)
Esempio n. 5
0
def get_history_sum_embedded(self):
    # TODO: add mask info for this operation
    his_days =['one', 'two', 'three', 'four','five','six','seven','eight','nine',
    'ten','eleven','twelve','thirteen','fourteen']

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(self, embed_key,get_mask_zero_embedded(self.chapters_embeddings_var,
                                    getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(self, embed_key,get_mask_zero_embedded(self.sections_embeddings_var,
                                    getattr(self, key)))



    chap = tf.reduce_mean(self.history_one_chap_embedded, axis=-2)
    #b*x*128 b*128    b*(128*14)
    sec = tf.reduce_mean(self.history_one_sec_embedded, axis=-2)

    for fir in his_days[:0:-1]:
        key_c = "history_" + fir + "_chap_embedded"
        chap = tf.concat([chap, tf.reduce_mean(getattr(self, key_c), axis=-2)], axis=-1)
        key_s = "history_" + fir + "_sec_embedded"
        sec = tf.concat([sec, tf.reduce_mean(getattr(self, key_s), axis=-2)], axis=-1)
    
    history_chap_emb = tf.reshape(chap, [-1, HIS_DAYS, EMBEDDING_DIM])
    history_sec_emb = tf.reshape(sec, [-1, HIS_DAYS, EMBEDDING_DIM])


    chap_mean = tf.reduce_mean(history_chap_emb, axis=-2)
    sec_mean = tf.reduce_mean(history_sec_emb, axis=-2)
    return chap_mean, sec_mean
Esempio n. 6
0
    def __init__(self, *, use_dice=False):
        super().__init__(use_dice=use_dice)
        self.other_inputs()

        teacher = [
            # self.teacher_id_embedded,
            self.province_id_embedded,
            self.city_id_embedded,
            self.core_type_embedded,
            self.student_count_embedded,
        ]
        # 0-4
        clazz = [
            # self.class_id_embedded,
            self.edition_id_embedded,
            self.grade_id_embedded,
            self.class_student_embedded,
            self.cap_avg_embedded,
            self.cap_max_embedded,
            self.cap_min_embedded,
        ]
        # 5-11
        study = [
            self.study_vector_embedded,
            self.gap_days_embedded,
        ]
        # 12-13
        submit = [
            self.month_submit_rate_embedded,
        ]
        # 14
        capacity = [
            self.region_capacity_embedded,
        ]
        # 15
        prefer = [
            self.prefer_assign_time_avg_embedded,
            self.prefer_assign_time_var_embedded,
            self.prefer_assign_rank_avg_embedded,
            self.prefer_assign_rank_var_embedded,
        ]
        # 16-19
        register = [
            self.register_diff_embedded,
        ]
        # 20
        homeworkcount = [
            self.homework_count_embedded,
        ]
        # 21
        weekcount = [
            self.week_count_embedded,
        ]
        # 22
        lastday = [
            self.lastday_count_embedded,
        ]
        # 23

        o = teacher + clazz + study + submit + \
            capacity + prefer + register + homeworkcount + weekcount + lastday

        # here, we do like this, not tf.concat(o,axis=-1),
        # because, issue:https://github.com/tensorflow/tensorflow/issues/24816
        # ps: style,homework,reflect don't need to do like this, proved.
        others = o[0]
        for i in o[1:]:
            others = tf.concat([others, i], axis=-1)
        others = [others]

        style = []
        for fir in ["1", "2", "3", "4"]:
            for sec in ["100", "010", "001", "110", "101", "011", "111"]:
                embed_key = "style_" + fir + "0" + sec + "_embedded"
                style.append(getattr(self, embed_key))

        homework = []
        homework.append(self.today_style_embedded)
        homework.append(tf.reduce_mean(self.today_chapters_embedded, -2))
        homework.append(tf.reduce_mean(self.today_sections_embedded, -2))
        homework.append(self.history_chap_embedded)
        homework.append(self.history_sec_embedded)
        homework.append(self.history_chap_embedded *
                        tf.reduce_mean(self.today_chapters_embedded, -2))
        homework.append(self.history_sec_embedded *
                        tf.reduce_mean(self.today_sections_embedded, -2))

        reflect = []
        reflect.append(tf.reduce_mean(self.reflect_value_embedded, -2))

        with self.graph.as_default():
            with tf.name_scope("Concat"):
                inps = tf.concat(others + style + homework + reflect, -1)
        self.build_fcn_net(inps, self.use_dice)
Esempio n. 7
0
def get_history_gru_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    # [1,2][3][4,3,5][0,0]...  chap    14天
    # [][][]  sec
    # cha_sec 拼接后rnn  cha rnn  sec rnn  拼劲 或者不拼接
    #
    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"
        #  B 3 128   B 128  14 B 128    B 14 128
        history_cha_embedded.append(
            tf.reduce_mean(getattr(self, key_c), axis=-2))
        history_sec_embedded.append(
            tf.reduce_mean(getattr(self, key_s), axis=-2))

    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    with tf.name_scope("GRU"):
        num_layers = 2
        HIDDEN_DIM = 128
        KEEP_PROB = 0.8

        with tf.name_scope('gru1'):

            def get_cell():
                cell2 = tf.nn.rnn_cell.GRUCell(HIDDEN_DIM)
                cell2_ = tf.nn.rnn_cell.DropoutWrapper(
                    cell2, output_keep_prob=KEEP_PROB)
                return cell2_

            cells = [get_cell() for _ in range(num_layers)]
            Cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
            rnnoutput, _ = tf.nn.dynamic_rnn(cell=Cell,
                                             inputs=history_cha_emb,
                                             dtype=tf.float32,
                                             scope='cha')
            rnnoutput1 = tf.reduce_sum(rnnoutput, axis=-2)

    with tf.name_scope("GRU2"):
        num_layers = 2
        HIDDEN_DIM = 128
        KEEP_PROB = 0.8

        with tf.name_scope('gru2'):
            cells2 = []
            for _ in range(num_layers):
                cell = tf.nn.rnn_cell.GRUCell(HIDDEN_DIM)
                cell_ = tf.nn.rnn_cell.DropoutWrapper(
                    cell, output_keep_prob=KEEP_PROB)
                cells2.append(cell_)
            #cells2 = [get_cell2() for _ in range(num_layers)]
            Cell2 = tf.nn.rnn_cell.MultiRNNCell(cells2, state_is_tuple=True)
            rnnoutput2, _ = tf.nn.dynamic_rnn(cell=Cell2,
                                              inputs=history_sec_emb,
                                              dtype=tf.float32,
                                              scope='SEC')

            rnnoutput3 = tf.reduce_sum(rnnoutput2, axis=-2)

    #gru_out = tf.concat([rnnoutput1,rnnoutput3],axis=-1)

    return rnnoutput1, rnnoutput3
Esempio n. 8
0
 def __init__(self, dat, dim_rec, dim_z, dim_gen, scope='vae'):
     assert 2 == dat.ndim
     assert isinstance(dim_rec, tuple)
     assert isinstance(dim_z, int)
     assert isinstance(dim_gen, tuple)
     init_w = tf.variance_scaling_initializer(scale=2.0,
                                              mode='fan_in',
                                              distribution='uniform')
     init_b = tf.constant_initializer(0.01)
     init_z = tf.zeros_initializer()
     with tf.variable_scope(scope):
         dat = self.dat = tf.constant(name='dat', value=dat)
         bs_ = self.bs_ = tf.placeholder(name='bs_',
                                         dtype=tf.int32,
                                         shape=())
         bat = self.bat = tf.random_uniform(name='bat',
                                            shape=(bs_, ),
                                            minval=0,
                                            maxval=dat.shape[0],
                                            dtype=tf.int32)
         h = x = self.x = tf.nn.embedding_lookup(name='x',
                                                 params=dat,
                                                 ids=bat)
         for i, dim in enumerate(dim_rec, 1):
             name = "hr{}".format(i)
             h = tf.layers.dense(name=name,
                                 inputs=h,
                                 units=dim,
                                 activation=tf.nn.relu,
                                 kernel_initializer=init_w,
                                 bias_initializer=init_b)
             setattr(self, name, h)
         mu = self.mu = tf.layers.dense(name='mu',
                                        inputs=h,
                                        units=dim_z,
                                        kernel_initializer=init_w,
                                        bias_initializer=init_z)
         lv = self.lv = tf.layers.dense(name='lv',
                                        inputs=h,
                                        units=dim_z,
                                        kernel_initializer=init_w,
                                        bias_initializer=init_z)
         with tf.name_scope('z'):
             h = z = self.z = mu + tf.exp(
                 0.5 * lv) * tf.random_normal(shape=tf.shape(lv))
         for i, dim in enumerate(dim_gen, 1):
             name = "hg{}".format(i)
             h = tf.layers.dense(name=name,
                                 inputs=h,
                                 units=dim,
                                 activation=tf.nn.relu,
                                 kernel_initializer=init_w,
                                 bias_initializer=init_b)
             setattr(self, name, h)
         logits = tf.layers.dense(
             name='logits',
             inputs=h,
             units=dat.shape[1]
             # , activation= tf.nn.sigmoid
             ,
             kernel_initializer=init_w,
             bias_initializer=init_z)
         g = self.g = tf.sigmoid(logits)
         with tf.name_scope('loss_recons'):
             # loss_recons = self.loss_recons = tf.reduce_mean(
             #     tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels= x, logits= logits), axis= 1))
             loss_recons = self.loss_recons = tf.reduce_mean(
                 tf.reduce_sum(tf.square(x - g), axis=1))
         with tf.name_scope('loss_relent'):
             # loss_relent = self.loss_relent = tf.reduce_mean(
             #     0.5 * tf.reduce_sum((- 1.0 - lv + tf.exp(lv) + tf.square(mu)), axis= 1))
             loss_relent = self.loss_relent = tf.reduce_mean(
                 tf.reduce_sum((-1.0 - lv + tf.exp(lv) + tf.square(mu)),
                               axis=1))
         with tf.name_scope('loss'):
             loss = self.loss = loss_relent + loss_recons
         up = self.up = tf.train.AdamOptimizer().minimize(loss)
         self.step = 0
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(
            tf.reduce_mean(getattr(self, key_c), axis=-2))
        history_sec_embedded.append(
            tf.reduce_mean(getattr(self, key_s), axis=-2))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])

    #dien
    with tf.name_scope('rnn_1'):
        rnn_outputs, _ = dynamic_rnn(GRUCell(HIDDEN_SIZE),
                                     inputs=history_cha_emb,
                                     sequence_length=self.history_len_ph,
                                     dtype=tf.float32,
                                     scope="gru1")
    with tf.name_scope('Attention_layer_1'):
        att_outputs, alphas = din_fcn_attention(tf.reduce_mean(
            self.today_chapters_embedded, axis=-2),
                                                rnn_outputs,
                                                ATTENTION_SIZE,
                                                self.history_mask_cha_ph,
                                                scope="1_1",
                                                softmax_stag=1,
                                                stag='1_1',
                                                mode='LIST',
                                                return_alphas=True)
    with tf.name_scope('rnn_2'):
        rnn_outputs2, final_state2 = dynamic_rnn(
            VecAttGRUCell(HIDDEN_SIZE),
            inputs=rnn_outputs,
            att_scores=tf.expand_dims(alphas, -1),
            sequence_length=self.history_len_ph,
            dtype=tf.float32,
            scope="gru2")

    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    with tf.name_scope('rnn_11'):
        rnn_outputs3, _ = dynamic_rnn(GRUCell(HIDDEN_SIZE),
                                      inputs=history_sec_emb,
                                      sequence_length=self.history_len_ph,
                                      dtype=tf.float32,
                                      scope="gru11")
    with tf.name_scope('Attention_layer_11'):
        att_outputs3, alphas3 = din_fcn_attention(tf.reduce_mean(
            self.today_sections_embedded, axis=-2),
                                                  rnn_outputs3,
                                                  ATTENTION_SIZE,
                                                  self.history_mask_sec_ph,
                                                  scope="2_2",
                                                  softmax_stag=1,
                                                  stag='2_2',
                                                  mode='LIST',
                                                  return_alphas=True)
    with tf.name_scope('rnn_22'):
        rnn_outputs4, final_state3 = dynamic_rnn(
            VecAttGRUCell(HIDDEN_SIZE),
            inputs=rnn_outputs3,
            att_scores=tf.expand_dims(alphas3, -1),
            sequence_length=self.history_len_ph,
            dtype=tf.float32,
            scope="gru22")

    return final_state2, final_state3
def get_history_bgru_embedded(self):
    # TODO: add mask info for this operation
    his_days =['one', 'two', 'three', 'four','five','six','seven','eight','nine',
    'ten','eleven','twelve','thirteen','fourteen']

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(self, embed_key,get_mask_zero_embedded(self.chapters_embeddings_var,
                                    getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(self, embed_key,get_mask_zero_embedded(self.sections_embeddings_var,
                                    getattr(self, key)))

    
    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"
        #  B 3 128   B 128  14 B 128    B 14 128  
        history_cha_embedded.append(
        tf.reduce_mean(getattr(self, key_c),axis=-2))
        history_sec_embedded.append(
        tf.reduce_mean(getattr(self, key_s),axis=-2 ))

    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    with tf.name_scope("GRU"):
        num_layers=2
        HIDDEN_DIM=128
        KEEP_PROB =0.8
        with tf.name_scope('cell'):
            def build_cell(n,m):
                cell = tf.nn.rnn_cell.GRUCell(n)                  
                cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=m)
                return cell
            num_units=[HIDDEN_DIM//2,HIDDEN_DIM//2]
            
            cell_fw = [build_cell(n,KEEP_PROB) for n in num_units]
            cell_bw = [build_cell(n,KEEP_PROB) for n in num_units]

        with tf.name_scope('gru'):
            biout,output_fw,output_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
                cell_fw,cell_bw,inputs= history_cha_emb,dtype=tf.float32
                ,scope='cha')
            rnnoutput = tf.reduce_sum(biout, axis=-2)
        with tf.name_scope('cell2'):
            def build_cell2(n,m):
                cell2 = tf.nn.rnn_cell.GRUCell(n)                  
                cell2 = tf.nn.rnn_cell.DropoutWrapper(cell2, output_keep_prob=m)
                return cell2
            num_units2=[HIDDEN_DIM//2,HIDDEN_DIM//2]
            
            cell_fw2 = [build_cell2(n,KEEP_PROB) for n in num_units2]
            cell_bw2 = [build_cell2(n,KEEP_PROB) for n in num_units2]

        with tf.name_scope('gru2'):
            biout2,output_fw2,output_bw2 = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
                cell_fw2,cell_bw2,inputs= history_sec_emb,dtype=tf.float32
                ,scope='sec')
            rnnoutput2 = tf.reduce_sum(biout2, axis=-2)


    #rnn_output = tf.concat([rnnoutput,rnnoutput2],axis=-1)
    return rnnoutput,rnnoutput2