コード例 #1
0
def get_history_sum_embedded(self):
    # TODO: add mask info for this operation
    his_days =['one', 'two', 'three', 'four','five','six','seven','eight','nine',
    'ten','eleven','twelve','thirteen','fourteen']

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(self, embed_key,get_mask_zero_embedded(self.chapters_embeddings_var,
                                    getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(self, embed_key,get_mask_zero_embedded(self.sections_embeddings_var,
                                    getattr(self, key)))



    chap = tf.reduce_mean(self.history_one_chap_embedded, axis=-2)
    #b*x*128 b*128    b*(128*14)
    sec = tf.reduce_mean(self.history_one_sec_embedded, axis=-2)

    for fir in his_days[:0:-1]:
        key_c = "history_" + fir + "_chap_embedded"
        chap = tf.concat([chap, tf.reduce_mean(getattr(self, key_c), axis=-2)], axis=-1)
        key_s = "history_" + fir + "_sec_embedded"
        sec = tf.concat([sec, tf.reduce_mean(getattr(self, key_s), axis=-2)], axis=-1)
    
    history_chap_emb = tf.reshape(chap, [-1, HIS_DAYS, EMBEDDING_DIM])
    history_sec_emb = tf.reshape(sec, [-1, HIS_DAYS, EMBEDDING_DIM])


    chap_mean = tf.reduce_mean(history_chap_emb, axis=-2)
    sec_mean = tf.reduce_mean(history_sec_emb, axis=-2)
    return chap_mean, sec_mean
コード例 #2
0
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(get_rnn_sum(getattr(self, key_c),
                                                "rnncha"))
        history_sec_embedded.append(get_rnn_sum(getattr(self, key_s),
                                                "rnnsec"))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    attention_cha_output = din_attention(
        tf.concat([
            get_rnn_sum(self.today_chapters_embedded, "rnncha"),
            get_rnn_sum(self.today_sections_embedded, "rnnsec")
        ],
                  axis=-1),
        tf.concat([history_cha_emb, history_sec_emb], axis=-1),
        ATTENTION_SIZE,
        self.history_mask_cha_ph,
        stag="cha")

    att_fea1 = tf.reduce_sum(attention_cha_output, -2)

    #atte_out = tf.concat([att_fea1,att_fea2],axis=-1)

    return att_fea1
コード例 #3
0
def get_rnn_sum(input_seq, name='cha'):

    #with tf.name_scope("GRU"):
    num_layers = 2
    HIDDEN_DIM = 128
    KEEP_PROB = 0.8
    with tf.name_scope('cell'), tf.variable_scope("cell", reuse=tf.AUTO_REUSE):

        def build_cell(n, m):
            cell = tf.nn.rnn_cell.GRUCell(n)
            cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=m)
            return cell

        num_units = [HIDDEN_DIM // 2, HIDDEN_DIM // 2]

        cell_fw = [build_cell(n, KEEP_PROB) for n in num_units]
        cell_bw = [build_cell(n, KEEP_PROB) for n in num_units]

    with tf.name_scope('gru'), tf.variable_scope("gru", reuse=tf.AUTO_REUSE):
        biout, output_fw, output_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
            cell_fw, cell_bw, inputs=input_seq, dtype=tf.float32, scope=name)

        rnnoutput = tf.reduce_sum(tf.concat([biout, input_seq], axis=-1),
                                  axis=-2)

    return rnnoutput
コード例 #4
0
    def __call__(self, i_to_s, state, scope="DiagonalBiLSTMCell"):

        c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
        h_prev = tf.slice(state, [0, self._num_units], [-1, self._num_units])

        with tf.compat.v1.variable_scope(scope):

            conv1d_inputs = tf.reshape(
                h_prev, [-1, self._height, 1, self._hidden_dims],
                name='conv1d_inputs')

            conv_s_to_s = conv1d(conv1d_inputs,
                                 4 * self._hidden_dims,
                                 2,
                                 scope='s_to_s')
            s_to_s = tf.reshape(conv_s_to_s,
                                [-1, self._height * self._hidden_dims * 4])

            lstm_matrix = tf.sigmoid(s_to_s + i_to_s)

            i, g, f, o = tf.split(lstm_matrix, 4, 1)

            c = f * c_prev + i * g
            h = tf.multiply(o, tf.tanh(c), name='hid')

        new_state = tf.concat([c, h], 1)
        return h, new_state
コード例 #5
0
def get_history_sum_embedded(self):
    # TODO: add mask info for this operation
    his_days =['one', 'two', 'three', 'four','five','six','seven','eight','nine',
    'ten','eleven','twelve','thirteen','fourteen']

    for fir in his_days:
        key_c = "history_" + fir + "_chap_ph"
        embed_key_c = "history_" + fir + "_chap_embedded"
        setattr(self, embed_key_c,get_mask_zero_embedded(self.chapters_embeddings_var,
                                    getattr(self, key_c)))
    
        key_s = "history_" + fir + "_sec_ph"
        embed_key_s = "history_" + fir + "_sec_embedded"
        setattr(self, embed_key_s,get_mask_zero_embedded(self.sections_embeddings_var,
                                    getattr(self, key_s)))

        key_st = "style_" + fir + "_ph"
        embed_key_st = "history_" + fir + "_sty_embedded"
        setattr(self, embed_key_st,tf.nn.embedding_lookup(self.style_embeddings_var,
                                    getattr(self, key_st)))  
            

    chap = get_rnn_sum(self.history_one_chap_embedded,"rnncha")
    #b*x*128 b*128    b*(128*14)
    sec = get_rnn_sum(self.history_one_sec_embedded, "rnnsec")
    sty = self.history_one_sty_embedded

    for fir in his_days[:0:-1]:
        key_c = "history_" + fir + "_chap_embedded"
        chap = tf.concat([chap, get_rnn_sum(getattr(self, key_c), "rnncha")], axis=-1)
        key_s = "history_" + fir + "_sec_embedded"
        sec = tf.concat([sec, get_rnn_sum(getattr(self, key_s), "rnnsec")], axis=-1)
        
        key_st = "history_" + fir + "_sty_embedded"
        sty = tf.concat([sty, getattr(self, key_st)], axis=-1)
    
    history_chap_emb = tf.reshape(chap, [-1, HIS_DAYS, EMBEDDING_DIM])
    history_sec_emb = tf.reshape(sec, [-1, HIS_DAYS, EMBEDDING_DIM])
    history_sty_emb = tf.reshape(sty, [-1, HIS_DAYS, EMBEDDING_DIM])

    chap_mean = tf.reduce_mean(history_chap_emb, axis=-2)
    sec_mean = tf.reduce_mean(history_sec_emb, axis=-2)
    sty_mean = tf.reduce_mean(history_sty_emb, axis=-2)
    #return chap_mean, sec_mean

    return tf.concat([chap_mean, sec_mean,sty_mean], axis=-1)
コード例 #6
0
def get_mask_zero_embedded(var_em, var_ph):
    mask = tf.equal(var_ph, 0)
    mask2 = tf.concat(
        [tf.expand_dims(~mask, -1) for i in range(EMBEDDING_DIM)], -1)

    rst = tf.where(
        mask2, tf.nn.embedding_lookup(var_em, var_ph),
        tf.zeros([tf.shape(var_ph)[0],
                  tf.shape(var_ph)[1], EMBEDDING_DIM]))
    return rst
コード例 #7
0
def unskew(inputs, width=None, scope="unskew"):

    with tf.compat.v1.name_scope(scope):

        batch, height, skewed_width, channel = inputs.get_shape().as_list()
        width = width if width else height

        new_rows = []
        rows = tf.split(inputs, height, 1)

        for idx, row in enumerate(rows):
            new_rows.append(tf.slice(row, [0, 0, idx, 0], [-1, -1, width, -1]))

        outputs = tf.concat(new_rows, 1, name="output")

    return outputs
コード例 #8
0
    def build_fcn_net(self, inp, use_dice=False):
        with self.graph.as_default():
            self.saver = tf.train.Saver(max_to_keep=1)

            with tf.name_scope("Out"):
                bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1')
                dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1')
                if use_dice:
                    dnn1 = dice(dnn1, name='dice_1')
                else:
                    dnn1 = prelu(dnn1, 'prelu1')

                dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2')
                if use_dice:
                    dnn2 = dice(dnn2, name='dice_2')
                else:
                    dnn2 = prelu(dnn2, 'prelu2')
                dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3')
                self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

            with tf.name_scope('Metrics'):
                # Cross-entropy loss and optimizer initialization
                coe = tf.constant([1.2, 1.2])
                coe_mask = tf.equal(self.core_type_ph, 1)
                coe_mask2 = tf.concat(
                    [tf.expand_dims(coe_mask, -1) for i in range(2)], -1)
                self.target_ph_coe = tf.where(coe_mask2, self.target_ph * coe,
                                              self.target_ph)

                ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
                self.loss = ctr_loss
                # tf.summary.scalar('loss', self.loss)
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.lr_ph).minimize(self.loss)
                # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss)
                # Accuracy metric
                self.accuracy = tf.reduce_mean(
                    tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph),
                            tf.float32))
                # tf.summary.scalar('accuracy', self.accuracy)

            self.merged = tf.summary.merge_all()
コード例 #9
0
def diagonal_bilstm(inputs, scope='diagonal_bilstm'):
    with tf.compat.v1.variable_scope(scope):

        def reverse(inputs):
            return tf.reverse(inputs, [2])

        output_state_fw = diagonal_lstm(inputs, scope='output_state_fw')
        output_state_bw = reverse(
            diagonal_lstm(reverse(inputs), scope='output_state_bw'))

        batch, height, width, channel = output_state_bw.get_shape().as_list()

        output_state_bw_except_last = tf.slice(output_state_bw, [0, 0, 0, 0],
                                               [-1, height - 1, -1, -1])
        output_state_bw_only_last = tf.slice(output_state_bw,
                                             [0, height - 1, 0, 0],
                                             [-1, 1, -1, -1])
        dummy_zeros = tf.zeros_like(output_state_bw_only_last)

        output_state_bw_with_last_zeros = tf.concat(
            [output_state_bw_except_last, dummy_zeros], 1)

        return output_state_fw + output_state_bw_with_last_zeros
コード例 #10
0
    def __init__(self, *, use_dice=False):
        super().__init__(use_dice=use_dice)
        self.other_inputs()

        teacher = [
            #self.teacher_id_embedded,
            self.province_id_embedded,
            self.city_id_embedded,
            self.core_type_embedded,
            self.student_count_embedded, ]
        # 0-4
        clazz = [
            #self.class_id_embedded,
            self.edition_id_embedded,
            self.grade_id_embedded,
            self.class_student_embedded,
            self.cap_avg_embedded,
            self.cap_max_embedded,
            self.cap_min_embedded, ]
        # 5-11
        study = [
            self.study_vector_embedded,
            self.gap_days_embedded, ]
        # 12-13
        submit = [self.month_submit_rate_embedded,
                  ]
        # 14
        capacity = [self.region_capacity_embedded,
                    ]
        # 15
        prefer = [self.prefer_assign_time_avg_embedded,
                  self.prefer_assign_time_var_embedded,
                  self.prefer_assign_rank_avg_embedded,
                  self.prefer_assign_rank_var_embedded, ]
        # 16-19
        register = [self.register_diff_embedded,
                    ]
        # 20
        homeworkcount = [self.homework_count_embedded,
                         ]
        # 21
        weekcount = [self.week_count_embedded,
                     ]
        # 22
        lastday = [self.lastday_count_embedded,
                   ]
        # 23
        study_analysis = [self.analysis_avg_times_embedded,
        self.analysis_avg_exp_score_embedded,
        self.analysis_avg_rate_embedded,
        self.analysis_avg_exp_level_embedded

        ]


        o = teacher + clazz + study + submit + \
            capacity + prefer + register + homeworkcount + weekcount + lastday+study_analysis

        # here, we do like this, not tf.concat(o,axis=-1),
        # because, issue:https://github.com/tensorflow/tensorflow/issues/24816
        # ps: style,homework,reflect don't need to do like this, proved.
        others = o[0]
        for i in o[1:]:
            others = tf.concat([others, i], axis=-1)
        others = [others]

        # style = []
        # for fir in ["1", "2", "3", "4"]:
        #     for sec in ["100", "010", "001", "110", "101", "011", "111"]:
        #         embed_key = "style_" + fir + "0" + sec + "_embedded"
        #         style.append(getattr(self, embed_key))

        homework = []
        homework.append(self.today_style_embedded)
        homework.append(tf.concat([self.today_cha_rnn,self.today_sec_rnn],axis=-1))
        homework.append(self.history_chap_embedded)
        homework.append(self.history_chap_embedded *tf.concat([self.today_cha_rnn,self.today_sec_rnn,self.today_style_embedded],axis=-1))

        reflect = []
        reflect.append(self.ref_rnn )

        with self.graph.as_default():
            with tf.name_scope("Concat"):
                inps = tf.concat(others +  homework + reflect, -1)
        self.build_fcn_net(inps, self.use_dice)
コード例 #11
0
def get_history_bgru_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"
        #  B 3 128   B 128  14 B 128    B 14 128
        history_cha_embedded.append(get_rnn_sum(getattr(self, key_c),
                                                "rnncha"))
        history_sec_embedded.append(get_rnn_sum(getattr(self, key_s),
                                                "rnnsec"))

    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    with tf.name_scope("GRU"):
        num_layers = 2
        HIDDEN_DIM = 128
        KEEP_PROB = 0.8
        with tf.name_scope('cell'):

            def build_cell(n, m):
                cell = tf.nn.rnn_cell.GRUCell(n)
                cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=m)
                return cell

            num_units = [HIDDEN_DIM // 2, HIDDEN_DIM]

            cell_fw = [build_cell(n, KEEP_PROB) for n in num_units]
            cell_bw = [build_cell(n, KEEP_PROB) for n in num_units]

        with tf.name_scope('gru'):
            biout, output_fw, output_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
                cell_fw,
                cell_bw,
                inputs=tf.concat([history_cha_emb, history_sec_emb], axis=-1),
                dtype=tf.float32,
                scope='cha')
            rnnoutput = tf.reduce_sum(biout, axis=-2)

    return rnnoutput
コード例 #12
0
def get_history_din_embedded(self):
    # TODO: add mask info for this operation
    his_days = [
        'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
        'ten', 'eleven', 'twelve', 'thirteen', 'fourteen'
    ]

    for fir in his_days:
        key = "history_" + fir + "_chap_ph"
        embed_key = "history_" + fir + "_chap_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.chapters_embeddings_var,
                                   getattr(self, key)))
    for fir in his_days:
        key = "history_" + fir + "_sec_ph"
        embed_key = "history_" + fir + "_sec_embedded"
        setattr(
            self, embed_key,
            get_mask_zero_embedded(self.sections_embeddings_var,
                                   getattr(self, key)))

    history_cha_embedded = []
    history_sec_embedded = []
    for fir in his_days[::-1]:
        key_c = "history_" + fir + "_chap_embedded"

        key_s = "history_" + fir + "_sec_embedded"

        history_cha_embedded.append(get_rnn_sum(getattr(self, key_c),
                                                "rnncha"))
        history_sec_embedded.append(get_rnn_sum(getattr(self, key_s),
                                                "rnnsec"))

    #self.history_all_embedded = tf.reshape(, [None,len(history_all_embedded),EMBEDDING_DIM])
    # T*B*N   -。 B*T*N
    history_cha_emb = tf.transpose(history_cha_embedded, [1, 0, 2])
    history_sec_emb = tf.transpose(history_sec_embedded, [1, 0, 2])

    #dien
    with tf.name_scope('rnn_1'):
        rnn_outputs, _ = dynamic_rnn(GRUCell(HIDDEN_SIZE * 2),
                                     inputs=tf.concat(
                                         [history_cha_emb, history_sec_emb],
                                         axis=-1),
                                     sequence_length=self.history_len_ph,
                                     dtype=tf.float32,
                                     scope="gru1")
    with tf.name_scope('Attention_layer_1'):
        att_outputs, alphas = din_fcn_attention(tf.concat([
            get_rnn_sum(self.today_chapters_embedded, "rnncha"),
            get_rnn_sum(self.today_sections_embedded, "rnnsec")
        ],
                                                          axis=-1),
                                                rnn_outputs,
                                                ATTENTION_SIZE,
                                                self.history_mask_cha_ph,
                                                scope="1_1",
                                                softmax_stag=1,
                                                stag='1_1',
                                                mode='LIST',
                                                return_alphas=True)
    with tf.name_scope('rnn_2'):
        rnn_outputs2, final_state2 = dynamic_rnn(
            VecAttGRUCell(HIDDEN_SIZE * 2),
            inputs=rnn_outputs,
            att_scores=tf.expand_dims(alphas, -1),
            sequence_length=self.history_len_ph,
            dtype=tf.float32,
            scope="gru2")

    return final_state2