예제 #1
0
def model_fn(features,labels,mode,params):
    def cnn_encoder(inputs,reuse=False,training=True):
        inputs = tf.expand_dims(inputs,axis=-1)
        with tf.variable_scope("cnn_encoder",reuse=reuse) \
            as cnnscope:
            conv = tf.layers.conv1d(inputs=inputs,
                              reuse=reuse,
                              filters=32,
                              kernel_size=[2,3,4],
                              padding="same",
                              activation=tf.nn.relu)
            pool = tf.reduce_max(input_tensor=conv,axis=-1)
            dropout_pool = tf.layers.dropout(inputs=pool,
                                               rate=0.2,training=training)
        return dropout_pool

    def lstm_encoder(inputs,reuse=True,training=True):
        initializer = tf.contrib.layers.xavier_initializer()
        lstm = tf.nn.rnn_cell.LSTMCell(300,initializer=initializer)
        with tf.variable_scope("lstm_encoder",reuse=reuse) \
            as lstmscope:
            outputs,_ = tf.nn.bidirectional_dynamic_rnn(lstm,lstm,
                                                    inputs,
                                                    dtype=tf.float32,
                                                    scope=lstmscope)
            output_fw,output_bw  = outputs
            concate_output = tf.concat([outputfw,output_bw],axis=-1)
        return concate_output
    

    embeddings = tf.Variable(params["embedding_initializer"],trainable=False)
    sent1_embed = tf.nn.embedding_lookup(embeddings,features['sent1'])
    sent2_embed = tf.nn.embedding_lookup(embeddings,features['sent2'])
    training = (mode == tf.estimator.ModeKeys.TRAIN)
    sent1_embed_drop = tf.layers.dropout(inputs=sent1_embed,\
                                        rate=0.2,
                                        training=training)
    sent2_embed_drop = tf.layers.dropout(inputs=sent2_embed,
                                         rate=0.2,
                                         training=training)
    sent1_cnn = cnn_encoder(sent1_embed_drop,False,training)
    sent2_cnn = cnn_encoder(sent2_embed_drop,True,training)
    sent1_lstm = lstm_encoder(sent1_embed_drop,False,training)
    sent2_lstm = lstm_encoder(sent2_embed_drop,True,training)
    concat_embed = tf.concate([sent1_cnn,sent1_embed,sent2_cnn,sent2_embed])
    logits = tf.layers.dense(concat_embed,3,activation=tf.nn.relu)

    if labels is not None:
        labels = tf.reshape(labels,[-1,1])
    optimizer = tf.train.AdamOptimizer()
    def _train_op_fn(loss):
        return optimizer.minimize(loss,tf.train.get_global_step())
    
    return head.create_estimator_spec(
        features=features,
        labels=labels,
        mode=mode,
        logits=logits, 
        train_op_fn=_train_op_fn)
    
예제 #2
0
    def call(self, words, features, hidden, object_seq):
        '''
          words shape: (batch_size, 1)
          features shape: (batch_size, 64, embedding_dim)
          object_seq: (batch_size, max_object_num)
        '''

        # apply attention later
        # context_vector, attention_weights = self.attention(features, hidden)

        # get the word embedding presentation
        x = self.embedding(words)  # ( batch_size, 1, embedding_dim)

        # integrating knowledge
        # (batch_size, max_object_num, embedding_dim)
        object_embedding = self.embedding(object_seq)
        similar_object_index = self.get_similar_object(
            object_embedding, x)  # (batch, 1, embedding_dim)

        external_know = np.zeros(x.shape[0], 1)
        for i, obj in enumerate(similar_object_index):
            obj = self.tokenizer.index_word[object_seq[i, obj]]
            scene, _ = get_knowledge(obj)
            if(len(scene_info) != 0):
                    # no external knowledge
                external_know[i] = self.tokenizer.word_index[scene[0]]
        external_know = self.embedding(
            external_know)  # (batch, 1, embedding_dim)
        external_know = tf.reshape(
            external_know, (-1, external_know.shape[-1]))

        # apply GRU
        # output: (batch_size, 1, units) state: batch_size, units
        output, state = self.gru(x)

        # apply sentinel
        s_t = self.sentinel(x, output, state)  # (batch_size, units)

        # apply attention
        context_vector, attention_weights, beta_t = self.attention(
            features, output, s_t)  # context_vector: (batch_size, units)

        # calculate c_hat_t
        beta_t = tf.expand_dims(beta_t, axis=-1)
        context_vector_new = beta_t * s_t + \
            (1 - beta_t) * context_vector  # (batch_size, units)

        # alignment
        x = tf.concat([tf.expand_dims(context_vector_new, 1),
                       output], axis=-1)  # (batch_size, 1, units)

        x = self.fc1(output)  # (batch_size, max_length, units)
        x = tf.reshape(x, (-1, x.shape[2]))  # (batch_size * max_length, units)
        x = self.fc2(tf.concate([x, external_know], axis=-1))

        return x, output, attention_weights
    def call(self, inputs, encoder_hidden_state, encoder_states, train=False):
        if train:
            initial_states = encoder_hidden_state
            all_states = []
            batch_size = initial_states[0][0].shape[0]
            output = []
            for current_word_index in range(inputs.shape[1]):
                current_word = inputs[:, current_word_index]
                current_word = tf.expand_dims(current_word, 1)
                all_state = self.embedding(current_word)
                current_initial_state = []
                for lstm_layer, initial_state, ecoder_state in zip(
                        self.lstm_layers, initial_states, encoder_states):
                    context_vec = self.attention(
                        (tf.expand_dims(initial_state[0], 1), encoder_state),
                        0)
                    all_state = tf.concate([all_state, context_vec])
                    ####### fix this
                    all_state, h, c = lstm_layer(all_state,
                                                 initial_state=initial_state)
                    current_initial_state.append((h, c))
                context_vec = self.attention(
                    (tf.expand_dims(h, 1), encoder_states), 0)
                all_state = tf.concat([context_vec, all_state], 2)
                current_word = self.fully_connected(all_state)
                output.append(current_word)

                initial_states = current_initial_state
            output = tf.concat(output, 1)
            return output
        else:
            initial_states = encoder_hidden_state
            all_states = []
            batch_size = initial_states[0][0].shape[0]
            current_word = np.ones((batch_size, 1))
            output = []
            for _ in range(self.max_length - 1):
                all_state = self.embedding(current_word)
                current_initial_state = []
                for lstm_layer, initial_state in zip(self.lstm_layers,
                                                     initial_states):
                    all_state, h, c = lstm_layer(all_state,
                                                 initial_state=initial_state)
                    current_initial_state.append((h, c))
                context_vec = self.attention(
                    (tf.expand_dims(h, 1), encoder_states), 0)
                all_state = tf.concat([context_vec, all_state], 2)

                current_word = self.fully_connected(all_state)
                current_word = tf.argmax(current_word, axis=2)
                output.append(current_word)

                initial_states = current_initial_state
            output = tf.concat(output, 1)
            return output
예제 #4
0
 def __init__(self, W_embedding, setting):
     self.model_name = settings.model_name
     self.title_len = settings.title_len
     self.content_len = settings.content_len
     self.filter_sizes = settings.filter_sizes
     self.n_filter = settings.n_filter
     self.n_filter_total = self.n_filter * len(self.filter_sizes)#256*5
     self.n_class = settings.n_class
     self.fc_hidden_size = settings.fc_hidden_size
     self._global_step=tf.Variable(0,trainable=False,name='Global_Step')
     self.update_emas=list()
     #placeholders
     self._tst=tf.placeholder(tf.bool)
     self._keep_prob=tf.placeholder(tf.float32,[])
     self._batch_size=tf.placeholder(tf.int32,[])
     
     
     with tf.name_scope('Inputs'):
         self._X1_inputs=tf.placeholder(tf.int64,[None,self.title_len],name='X1_inputs')
         self._X2_inputs=tf.placeholder(tf.int64,[None,self.content_len],name='X2_inputs')
         self._y_inputs=tf.placeholder(tf.float32,[None,self.n_class],name='y_inputs')
     with tf.variable_scope('embedding'):
         self.embedding=tf.get_variable(name='embedding',shape=W_embedding,
                                        initializer=tf.constant_initializer(W_embedding),trainable=True)
     self.embedding_size=W_embedding.shape[1]#1024
     
     with tf.variable_scope('cnn_text'):
         output_title=self.cnn_inference(self._X1_inputs, self.title_len)
     with tf.variable_scope('cnn_content'):
         output_content=self.cnn_inference(self._X2_inputs, self.content_len)
     with tf.variable_scope('fc_bn_layer'):
         output=tf.concate([output_title,output_content],axis=1)#batch_size*2560
         W_fc=self.weight_variable([2*self.n_filter_total,self.fc_hidden_size],name='Weight_fc')
         tf.summary.histogram('Weight_fc',W_fc)
         
         h_fc=tf.matmul(output,W_fc,name='h_fc')#batch_size*fc_hidden_size
         beta_fc=tf.Variable(tf.constant(0.1,tf.float32,shape=[self.fc_hidden_size],name='beta_fc'))
         tf.summary.histogram('beta_fc',beta_fc)
         fc_bn,update_ema_fc=self.batchnorm(h_fc,beta_fc,convolutional=False)
         self.update_emas.append(update_ema_fc)
         self.fc_bn_relu=tf.nn.relu(fc_bn,name='relu')
         fc_bn_drop=tf.nn.dropout(self.fc_bn_relu,self.keep_prob)
     with tf.variable_scope('out_layer'):
         W_out=self.weight_variable([self.fc_hidden_size,self.n_class],name='Weight_out')
         tf.summary.histogram('Weight_out',W_out)
         b_out=self.bias_variable([self.n_class],name='bias_out')
         tf.summary.histogram('bias_out',b_out)
         self._y_pred=tf.nn.xw_plus_b(fc_bn_drop,W_out,b_out,name='y_pred')
     with tf.name_scope('loss'):
         self._loss=tf.reduce_mean(
                 tf.nn.sigmoid_cross_entropy_with_logits(logits=self._y_pred,labels=self._y_inputs))
         tf.summary.scalar('loss',self._loss)
     self.saver=tf.train.Saver(max_to_keep=2)
예제 #5
0
    def decode_bbox(self, conv_bbox, anchors, stride):

        conv_shape = tf.shape(conv_bbox)
        batch_size = conv_shape[0]
        output_size = conv_shape[1]
        anchor_per_scale = len(anchors)

        conv_bbox = tf.reshape(conv_bbox,
                               (batch_size, output_size, output_size,
                                anchor_per_scale, 5 + self.num_class))

        conv_bbox_raw_xy = conv_bbox[:, :, :, :, 0:2]
        conv_bbox_raw_hw = conv_bbox[:, :, :, :, 2:4]
        conv_bbox_raw_conf = conv_bbox[:, :, :, :, 4:5]
        conv_bbox_raw_prob = conv_bbox[:, :, :, :, 5:]

        #x = tf.tile(tf.range(13, dtype=tf.int32)[:, tf.newaxis], [1,13])
        x = tf.range(13, dtype=tf.int32)
        x = tf.expand_dims(x, axis=-1)
        x = tf.tile(x, [1, 13])

        #y = tf.tile(tf.range(13, dtype=tf.int32)[:, tf.newaxis], [1,13])
        y = tf.range(13, dtype=tf.int32)
        y = tf.expand_dims(y, axis=-1)
        y = tf.tile(y, [1, 13])

        #xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)
        xy_grid = tf.concate(
            [tf.expand_dims(x, axis=-1),
             tf.expand_dims(y, axis=-1)], axis=-1)
        xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :],
                          [batch_size, 1, 1, anchor_per_scale, 1])
        xy_grid = tf.cast(xy_grid, tf.float32)

        pred_xy = (tf.sigmoid(conv_bbox_raw_xy) + xy_grid) * stride
        pred_hw = (tf.exp(conv_bbox_raw_hw) * anchors) * stride

        pred_xywh = tf.concat([pred_xy, pred_hw], axis=-1)

        pred_conf = tf.sigmoid(conv_bbox_raw_conf)

        pred_prob = tf.sigmoid(conv_bbox_raw_prob)

        result = tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)

        return result
예제 #6
0
    def run_head(self, proposals, stage):
        """
        Args:
            proposals: BoxProposals
            stage: 0, 1, 2
        Returns:
            FastRCNNHead
            Nx4, updated boxes
        """
        reg_weights = tf.constant(cfg.CASCADE.BBOX_REG_WEIGHTS[stage],
                                  dtype=tf.float32)  # 创建cascade的权重,是持久化常量浮点数
        pooled_feature = self.roi_func(proposals.boxes)  # N,C,S,S

        # FIXME
        if roi_func_extra != None:
            pooled_feature = tf.concate(
                [pooled_feature,
                 self.roi_func_extra(proposals.boxes)], 0)
        pooled_feature = self.scale_gradient(pooled_feature)  # 这里不太理解为什么重新赋值

        head_feature = self.fastrcnn_head_func('head', pooled_feature)
        # 82-87不太理解.....
        # changed by Paul
        label_logits, box_logits = fastrcnn_outputs(
            'outputs_new',
            head_feature,
            self.num_classes,
            class_agnostic_regression=True)
        head = FastRCNNHead(proposals, box_logits, label_logits, self.gt_boxes,
                            reg_weights)

        refined_boxes = head.decoded_output_boxes_class_agnostic()
        refined_boxes = clip_boxes(refined_boxes, self.image_shape2d)

        # tf.stop_gradient:停止梯度计算;参数 - 张量 + 操作名称
        return head, tf.stop_gradient(refined_boxes, name='output_boxes')
예제 #7
0
    def generator(self, z, y=None):
        with tensorflow.variable_scope('generator') as scope:
            if not self.y_dim:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_w2 = conv_out_size_same(s_h,
                                                2), conv_out_size_same(s_w, 2)
                s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(
                    s_w2, 2)
                s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(
                    s_w4, 2)
                s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(
                    s_w8, 2)

                self.z_, self.h0_w, self.h0_b = linear(z,
                                                       self.gf_dim * 8 *
                                                       s_h16 * s_w16,
                                                       'g_h0_lin',
                                                       with_w=True)
                self.h0 = tensorflow.reshape(
                    self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
                h0 = tensorflow.nn.relu(self.g_bn0(self.h0))
                self.h1, self.h1_w, self.h1_b = deconv2d(
                    h0, [self.batch_size, s_h8, s_w8, self.gf_dim * 4],
                    name='g_h1',
                    with_w=True)
                h1 = tensorflow.nn.relu(self.g_bn1(self.h1))
                h2, self.h2_w, self.h2_b = deconv2d(
                    h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2],
                    name='g_h2',
                    with_w=True)
                h2 = tensorflow.nn.relu(self.g_bn2(h2))
                h3, self.h3_w, self.h3_b = deconv2d(
                    h2, [self.batch_size, s_h2, s_w2, self.gf_dim * 1],
                    name='g_h3',
                    with_w=True)
                h3 = tensorflow.nn.relu(self.g_bn3(h3))
                h4, self.h4_w, self.h4_b = deconv2d(
                    h3, [self.batch_size, s_h, s_w, self.c_dim],
                    name='g_h4',
                    with_w=True)
                return tensorflow.nn.tanh(h4)
            else:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
                s_w2, s_w4 = int(s_w / 2), int(s_w / 4)

                yb = tensorflow.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                z = tensorflow.concate([z, y], 1)
                h0 = tensorflow.nn.relu(
                    self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin')))
                h0 = tensorflow.concat([h0, y], 1)
                h1 = tensorflow.nn.relu(
                    self.g_bn1(
                        linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin')))
                h1 = tensorflow.reshape(
                    h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])
                h1 = conv_conv_concat(h1, yb)
                h2 = tensorflow.nn.relu(
                    self.g_bn2(
                        deconv2d(
                            h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2],
                            name='g_h2')))
                h2 = conv_conv_concat(h2, yb)
                return tensorflow.nn.sigmoid(
                    deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim],
                             name='g_h3'))
예제 #8
0
파일: model.py 프로젝트: zhijie-ai/chelian
    def __init__(self, user_count, item_count, cate_count, cate_list):
        '''
            uij=(u, i, y, hist_i, sl)
            self.i: uij[1],
            self.y: uij[2],
            self.hist_i: uij[3],
            self.sl: uij[4],
            self.lr: l
        '''

        # self.u = tf.placeholder(tf.int32,[None,],name='user')
        self.i = tf.placeholder(tf.int32, [
            None,
        ], name='item')  #pos or neg id ,target
        self.j = tf.placeholder(tf.int32, [
            None,
        ], name='item_j')
        self.y = tf.placeholder(tf.float32, [
            None,
        ], name='label')
        self.hist_i = tf.placeholder(tf.int32, [None, None], name='history_i')
        self.s1 = tf.placeholder(tf.int32, [
            None,
        ], name='sequence_lenght')

        self.lr = tf.placeholder(tf.float64, name='learning_rate')

        hidden_units = 32

        # user_emb_w = tf.get_variable('user_emb_w',[user_count,hidden_units])
        item_emb_w = tf.get_variable('item_emb_w',
                                     [item_count, hidden_units // 2])
        item_b = tf.get_variable('item_b', [item_count],
                                 initializer=tf.constant_initializer(0.0))

        cate_emb_w = tf.get_variable('cate_emb_w',
                                     [cate_count, hidden_units // 2])
        cate_list = tf.convert_to_tensor(cate_list, dtype=tf.int64)

        # u_emb=tf.nn.embedding_lookup(user_emb_w,self.u)

        # ic 是item到category的转换
        self.ic = tf.gather(cate_list, self.i)
        i_emb = tf.concate(values=[
            tf.nn.embedding_lookup(item_emb_w, self.i),
            tf.nn.embedding_lookup(cate_emb_w, self.ic)
        ],
                           axis=1)

        i_b = tf.gather(item_b, self.i)

        self.jc = tf.gather(cate_list, self.j)
        j_emb = tf.concate(values=[
            tf.nn.embedding_lookup(item_emb_w, self.j),
            tf.nn.embedding_lookup(cate_emb_w, self.jc)
        ],
                           axis=1)
        j_b = tf.gather(item_b, self.j)

        self.hc = tf.gather(cate_list, self.hist_i)  #相当于user behavior features
        h_emb = tf.concat([
            tf.nn.embedding_lookup(item_emb_w, self.hist_i),
            tf.nn.embedding_lookup(cate_emb_w, self.hc)
        ],
                          axis=2)

        hist = attention(i_emb, h_emb, self.s1)

        hist = tf.layers.batch_normalization(inputs=hist)
        hist = tf.reshape(hist, [-1, hidden_units])
        hist = tf.layers.dense(hist, hidden_units)

        u_emb = hist

        # fcn begin
        din_i = tf.concat([u_emb, i_emb], axis=-1)
        din_i = tf.layers.batch_normalization(inputs=din_i, name='b1')
        d_layer_1_i = tf.layers.dense(din_i, 80, activation=None, name='f1')
        d_layer_1_i = dice(d_layer_1_i, name='dice_1_i')
        d_layer_2_i = tf.layers.dense(d_layer_1_i,
                                      40,
                                      activation=None,
                                      name='f2')
        d_layer_2_i = dice(d_layer_2_i, name='dice_2_i')
        d_layer_3_i = tf.layers.dense(d_layer_2_i,
                                      1,
                                      activation=None,
                                      name='f3')

        din_j = tf.concat([u_emb, j_emb], axis=-1)
        din_j = tf.layers.batch_normalization(inputs=din_j,
                                              name='b1',
                                              reuse=True)
        d_layer_1_j = tf.layers.dense(din_j,
                                      80,
                                      activation=None,
                                      name='f1',
                                      reuse=True)
        d_layer_1_j = dice(d_layer_1_j, name='dice_1_j')
        d_layer_2_j = tf.layers.dense(d_layer_1_j,
                                      40,
                                      activation=None,
                                      name='f2',
                                      reuse=True)
        d_layer_2_j = dice(d_layer_2_j, name='dice_2_j')
        d_layer_3_j = tf.layers.dense(d_layer_2_j,
                                      1,
                                      activation=None,
                                      name='f3',
                                      reuse=True)

        d_layer_3_i = tf.reshape(d_layer_3_i, [-1])
        d_layer_3_j = tf.reshape(d_layer_3_j, [-1])

        x = i_b - j_b + d_layer_3_i - d_layer_3_j  # [B]
        self.logits = i_b + d_layer_3_i

        # logits for all item:
        u_emb_all = tf.expand_dims(u_emb, 1)
        u_emb_all = tf.tile(u_emb_all, [1, item_count, 1])

        all_emb = tf.concat(
            [item_emb_w,
             tf.nn.embedding_lookup(cate_emb_w, cate_list)],
            axis=1)
        all_emb = tf.expand_dims(all_emb, 0)
        all_emb = tf.tile(all_emb, [512, 1, 1])
        din_all = tf.concat([u_emb_all, all_emb], axis=-1)
        din_all = tf.layers.batch_normalization(inputs=din_all,
                                                name='b1',
                                                reuse=True)
        d_layer_1_all = tf.layers.dense(din_all,
                                        80,
                                        activation=None,
                                        name='f1',
                                        reuse=True)
        d_layer_1_all = dice(d_layer_1_all, name='dice_1_all')
        d_layer_2_all = tf.layers.dense(d_layer_1_all,
                                        40,
                                        activation=None,
                                        name='f2',
                                        reuse=True)
        d_layer_2_all = dice(d_layer_2_all, name='dice_2_all')
        d_layer_3_all = tf.layers.dense(d_layer_2_all,
                                        1,
                                        activation=None,
                                        name='f3',
                                        reuse=True)
        d_layer_3_all = tf.reshape(d_layer_3_all, [-1, item_count])

        self.logits_all = tf.sigmoid(item_b + d_layer_3_all)
        # -- fcn end -------

        self.mf_auc = tf.reduce_mean(tf.to_float(x > 0))
        self.score_i = tf.sigmoid(i_b + d_layer_3_i)
        self.score_j = tf.sigmoid(j_b + d_layer_3_j)
        self.score_i = tf.reshape(self.score_i, [-1, 1])
        self.score_j = tf.reshape(self.score_j, [-1, 1])
        self.p_and_n = tf.concat([self.score_i, self.score_j], axis=-1)

        # Step variable
        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        self.global_epoch_step = \
            tf.Variable(0, trainable=False, name='global_epoch_step')
        self.global_epoch_step_op = \
            tf.assign(self.global_epoch_step, self.global_epoch_step + 1)

        # loss and train
        self.loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,
                                                    labels=self.y))

        trainable_params = tf.trainable_variables()
        self.train_op = tf.train.GradientDescentOptimizer(
            learning_rate=self.lr).minimize(self.loss)
예제 #9
0
def build_train(make_obs_ph,
                q_func,
                num_actions,
                optimizer,
                grad_norm_clipping=None,
                gamma=1.0,
                double_q=True,
                scope="deepq",
                reuse=None,
                param_noise=False,
                param_noise_filter_func=None):
    """Creates the train function:

    Parameters
    ----------
    make_obs_ph: str -> tf.placeholder or TfInput
        a function that takes a name and creates a placeholder of input with that name
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    num_actions: int
        number of actions
    reuse: bool
        whether or not to reuse the graph variables
    optimizer: tf.train.Optimizer
        optimizer to use for the Q-learning objective.
    grad_norm_clipping: float or None
        clip gradient norms to this value. If None no clipping is performed.
    gamma: float
        discount rate.
    double_q: bool
        if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
        In general it is a good idea to keep it enabled.
    scope: str or VariableScope
        optional scope for variable_scope.
    reuse: bool or None
        whether or not the variables should be reused. To be able to reuse the scope must be given.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    param_noise_filter_func: tf.Variable -> bool
        function that decides whether or not a variable should be perturbed. Only applicable
        if param_noise is True. If set to None, default_param_noise_filter is used by default.

    Returns
    -------
    act: (tf.Variable, bool, float) -> tf.Variable
        function to select and action given observation.
`       See the top of the file for details.
    train: (object, np.array, np.array, object, np.array, np.array) -> np.array
        optimize the error in Bellman's equation.
`       See the top of the file for details.
    update_target: () -> ()
        copy the parameters from optimized Q function to the target Q function.
`       See the top of the file for details.
    debug: {str: function}
        a bunch of functions to print debug data like q_values.
    """
    if param_noise:
        act_f = build_act_with_param_noise(
            make_obs_ph,
            q_func,
            num_actions,
            scope=scope,
            reuse=reuse,
            param_noise_filter_func=param_noise_filter_func)
    else:
        act_f = build_act(make_obs_ph,
                          q_func,
                          num_actions,
                          scope=scope,
                          reuse=reuse)

    with tf.variable_scope(scope, reuse=reuse):
        # set up placeholders
        obs_t_input = make_obs_ph("obs_t")
        act_t_ph = tf.placeholder(tf.int32, [None], name="action")
        rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
        obs_tp1_input = make_obs_ph("obs_tp1")
        done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
        importance_weights_ph = tf.placeholder(tf.float32, [None],
                                               name="weight")

        # q network evaluation
        q_t, act_idxs_t = q_func(obs_t_input.get(), scope="q_func",
                                 reuse=True)  # reuse parameters from act
        q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                        scope=tf.get_variable_scope().name +
                                        "/q_func")

        # target q network evalution
        q_tp1, act_idxs_tp1 = q_func(obs_tp1_input.get(),
                                     scope="target_q_func")
        target_q_func_vars = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES,
            scope=tf.get_variable_scope().name + "/target_q_func")

        # q scores for actions which we know were selected in the given state.
        q_t_selected = []
        #act_t_ph_list = tf.unstack(act_t_ph, 0)
        for (idx, value) in enumerate(q_t):
            q_t_selected.append(
                tf.gather(
                    value,
                    tf.where(
                        tf.equal(act_idxs_t[idx], tf.gather(act_t_ph,
                                                            idx)))[0]))
        #q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
        q_t_selected = tf.concate(q_t_selected, axis=0)
        # compute estimate of best possible value starting from state at t + 1
        if double_q:
            q_tp1_using_online_net, act_idxs_tp1_using_online_net = q_func(
                obs_tp1_input.get(), scope="q_func", reuse=True)
            q_tp1_best = []
            for (idx, value) in enumerate(q_tp1_using_online_net):
                q_tp1_best.append(
                    tf.gather(q_tp1[idx], tf.argmax(value, axis=0)))
            #q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
            q_tp1_best = tf.stack(q_tp1_best, axis=0)
        else:
            q_tp1_best = []
            for value in q_tp1:
                q_tp1_best.append(tf.reduce_max(value))
            q_tp1_best = tf.stack(q_tp1_best, axis=0)
        q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best

        # compute RHS of bellman equation
        q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked

        # compute the error (potentially clipped)
        td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
        errors = U.huber_loss(td_error)
        weighted_error = tf.reduce_mean(importance_weights_ph * errors)

        # compute optimization op (potentially with gradient clipping)
        if grad_norm_clipping is not None:
            gradients = optimizer.compute_gradients(weighted_error,
                                                    var_list=q_func_vars)
            for i, (grad, var) in enumerate(gradients):
                if grad is not None:
                    gradients[i] = (tf.clip_by_norm(grad,
                                                    grad_norm_clipping), var)
            optimize_expr = optimizer.apply_gradients(gradients)
        else:
            optimize_expr = optimizer.minimize(weighted_error,
                                               var_list=q_func_vars)

        # update_target_fn will be called periodically to copy Q network to target Q network
        update_target_expr = []
        for var, var_target in zip(
                sorted(q_func_vars, key=lambda v: v.name),
                sorted(target_q_func_vars, key=lambda v: v.name)):
            update_target_expr.append(var_target.assign(var))
        update_target_expr = tf.group(*update_target_expr)

        # Create callable functions
        train = U.function(inputs=[
            obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph,
            importance_weights_ph
        ],
                           outputs=td_error,
                           updates=[optimize_expr])
        update_target = U.function([], [], updates=[update_target_expr])

        q_values = U.function([obs_t_input], q_t)

        return act_f, train, update_target, {'q_values': q_values}
예제 #10
0
def system_mat_1st_order_elec( fwd_model, img):
    pp = fwd_model_parameters(fwd_model)
    FC, FF1, FF2, CC1, CC2 = system_mat_fields(fwd_model, elec_imped=False)
    FC_shape =FC.shape.as_list()
    lFC = FC_shape[0]
    n_dim = pp['n_dims']
    dim_n_elem = n_dim*pp['n_elem']

    elem_data = img['elem_data'].reshape(pp['n_elem'], 1)


    if len(elem_data.shape)<3:
        zc_ = np.zeros([pp['n_elec'], 1])
        zc = np.zeros([0, 1])
        for i in range(pp['n_elec']):
            eleci = fwd_model['electrode'][i]
            zc_[i] = eleci.z_contact
            bdy_idx, bdy_area = find_electrode_bdy(pp['boundary'], pp['nodes'], eleci.nodes)
            zc = np.vstack((zc, np.ones([bdy_idx[0].shape[0]*n_dim, 1], dtype=np.float32)*(1.0/zc_[i])))

        # zc = np.tile(zc, [n_dim, 1])
        elem_sigma = tf.contrib.kfac.utils.kronecker_product(tf.constant(elem_data, dtype=tf.float32), tf.ones([n_dim, 1], dtype=tf.float32))
        #elem_sigma = tf.concat([elem_sigma, tf.ones([lFC-dim_n_elem, 1], dtype=tf.float32)], axis=0)
        elem_sigma = tf.concat([elem_sigma, zc], axis=0)


        es_indices = tf.cast(tf.matmul(tf.ones([2,1], dtype=tf.int32), tf.reshape(tf.range(lFC), [1, lFC])), tf.int64)
        es_shape = [lFC, lFC]

        ES = tf.SparseTensor(tf.transpose(es_indices), tf.squeeze(elem_sigma), es_shape)
    else:
        # ToDo: need to test for complex conductivity
        if n_dim==2:
            idx = np.arange(1, dim_n_elem+1, 2)
            # [idx,idx+1,idx,idx+1]'  [idx,idx,idx+1,idx+1]'
            es_indices = tf.concate((tf.concat((idx,idx+1,idx,idx+1, np.arange(dim_n_elem+1, lFC)), axis=1).T,
                                     tf.concat((idx,idx,idx+1,idx+1, np.arange(dim_n_elem+1, lFC)), axis=1).T),
                                    axis=0)

            es_data = tf.concat((elem_data.flatten('F').reshape(dim_n_elem, 1), np.ones([lFC - dim_n_elem, 1], dtype=np.complex64)), axis=0)

            es_shape = [lFC, lFC]


            ES = tf.SparseTensor(es_indices.T,
                                 es_data.flatten('F'),
                                 dense_shape=es_shape)

        if n_dim==3:
            idx = np.arange(1, dim_n_elem+1, 3)

            es_indices = np.vstack(
                (np.hstack((idx, idx+1, idx+2, idx, idx+1, idx+2, idx, idx+1, idx+2, np.arange(dim_n_elem+1, lFC))).T,
                 np.hstack((idx, idx, idx, idx+1, idx+1, idx+1, idx+2, idx+2, idx+2, np.arange(dim_n_elem+1, lFC))).T)
            )

            es_data = np.vstack((elem_data.flatten('F').reshape(dim_n_elem, 1), np.ones[lFC - dim_n_elem, 1])).astype(np.complex64)

            es_shape = [lFC, lFC]

            ES = tf.SparseTensor(es_indices.T,
                                 es_data.flatten('F'),
                                 dense_shape=es_shape)

    FC_t = tf.transpose(FC)
    E = tf.matmul(FC_t, tf.sparse_tensor_dense_matmul(ES, FC))
    E = 0.5*tf.add(tf.transpose(E), E)

    return E
예제 #11
0
def system_mat_1st_order( fwd_model, img=None):
    pp = fwd_model_parameters(fwd_model)
    FC, _,_,_,_  = system_mat_fields(fwd_model, elec_imped=True)
    FC_shape =FC.shape.as_list()
    lFC = FC_shape[0]
    n_dim = pp['n_dims']
    dim_n_elem = n_dim*pp['n_elem']

    try:
        elem_data = img['elem_data'].reshape(pp['n_elem'], 1)
    except KeyError:
        elem_data = np.ones([pp['n_elem'], 1], dtype=np.float32)

    if len(elem_data.shape)<3:

        elem_sigma = tf.contrib.kfac.utils.kronecker_product(tf.constant(elem_data, dtype=tf.float32), tf.ones([n_dim, 1], dtype=tf.float32))
        elem_sigma = tf.concat([elem_sigma, tf.ones([lFC-dim_n_elem, 1], dtype=tf.float32)], axis=0)

        es_indices = tf.cast(tf.matmul(tf.ones([2,1], dtype=tf.int32), tf.reshape(tf.range(lFC), [1, lFC])), tf.int64)
        es_shape = [lFC, lFC]

        ES = tf.SparseTensor(tf.transpose(es_indices), tf.squeeze(elem_sigma), es_shape)
    else:
        # ToDo: need to test for future use
        if n_dim==2:
            idx = np.arange(1, dim_n_elem+1, 2)
            # [idx,idx+1,idx,idx+1]'  [idx,idx,idx+1,idx+1]'
            es_indices = tf.concate((tf.concat((idx,idx+1,idx,idx+1, np.arange(dim_n_elem+1, lFC)), axis=1).T,
                                     tf.concat((idx,idx,idx+1,idx+1, np.arange(dim_n_elem+1, lFC)), axis=1).T),
                                    axis=0)

            es_data = tf.concat((elem_data.flatten('F').reshape(dim_n_elem, 1), np.ones([lFC - dim_n_elem, 1], dtype=np.complex64)), axis=0)

            es_shape = [lFC, lFC]


            ES = tf.SparseTensor(es_indices.T,
                                 es_data.flatten('F'),
                                 dense_shape=es_shape)

        if n_dim==3:
            idx = np.arange(1, dim_n_elem+1, 3)

            es_indices = np.vstack(
                (np.hstack((idx, idx+1, idx+2, idx, idx+1, idx+2, idx, idx+1, idx+2, np.arange(dim_n_elem+1, lFC))).T,
                 np.hstack((idx, idx, idx, idx+1, idx+1, idx+1, idx+2, idx+2, idx+2, np.arange(dim_n_elem+1, lFC))).T)
            )

            es_data = np.vstack((elem_data.flatten('F').reshape(dim_n_elem, 1), np.ones[lFC - dim_n_elem, 1])).astype(np.complex64)

            es_shape = [lFC, lFC]

            ES = tf.SparseTensor(es_indices.T,
                                 es_data.flatten('F'),
                                 dense_shape=es_shape)

    FC_t = tf.transpose(FC)
    E = tf.matmul(FC_t, tf.sparse_tensor_dense_matmul(ES, FC))
    E = 0.5*tf.add(tf.transpose(E), E)

    return E
예제 #12
0
class TextCNN():
    """
    title: inputs->textcnn->output_title
    content: inputs->textcnn->output_content
    concat[output_title, output_content] -> fc+bn+relu -> sigmoid_entropy.
    """
    def __init__(self, W_embedding, setting):
        self.model_name = settings.model_name
        self.title_len = settings.title_len
        self.content_len = settings.content_len
        self.filter_sizes = settings.filter_sizes
        self.n_filter = settings.n_filter
        self.n_filter_total = self.n_filter * len(self.filter_sizes)#256*5
        self.n_class = settings.n_class
        self.fc_hidden_size = settings.fc_hidden_size
        self._global_step=tf.Variable(0,trainable=False,name='Global_Step')
        self.update_emas=list()
        #placeholders
        self._tst=tf.placeholder(tf.bool)
        self._keep_prob=tf.placeholder(tf.float32,[])
        self._batch_size=tf.placeholder(tf.int32,[])
        
        
        with tf.name_scope('Inputs'):
            self._X1_inputs=tf.placeholder(tf.int64,[None,self.title_len],name='X1_inputs')
            self._X2_inputs=tf.placeholder(tf.int64,[None,self.content_len],name='X2_inputs')
            self._y_inputs=tf.placeholder(tf.float32,[None,self.n_class],name='y_inputs')
        with tf.variable_scope('embedding'):
            self.embedding=tf.get_variable(name='embedding',shape=W_embedding,
                                           initializer=tf.constant_initializer(W_embedding),trainable=True)
        self.embedding_size=W_embedding.shape[1]#1024
        
        with tf.variable_scope('cnn_text'):
            output_title=self.cnn_inference(self._X1_inputs, self.title_len)
        with tf.variable_scope('cnn_content'):
            output_content=self.cnn_inference(self._X2_inputs, self.content_len)
        with tf.variable_scope('fc_bn_layer'):
            output=tf.concate([output_title,output_content],axis=1)#batch_size*2560
            W_fc=self.weight_variable([2*self.n_filter_total,self.fc_hidden_size],name='Weight_fc')
            tf.summary.histogram('Weight_fc',W_fc)
            
            h_fc=tf.matmul(output,W_fc,name='h_fc')#batch_size*fc_hidden_size
            beta_fc=tf.Variable(tf.constant(0.1,tf.float32,shape=[self.fc_hidden_size],name='beta_fc'))
            tf.summary.histogram('beta_fc',beta_fc)
            fc_bn,update_ema_fc=self.batchnorm(h_fc,beta_fc,convolutional=False)
            self.update_emas.append(update_ema_fc)
            self.fc_bn_relu=tf.nn.relu(fc_bn,name='relu')
            fc_bn_drop=tf.nn.dropout(self.fc_bn_relu,self.keep_prob)
        with tf.variable_scope('out_layer'):
            W_out=self.weight_variable([self.fc_hidden_size,self.n_class],name='Weight_out')
            tf.summary.histogram('Weight_out',W_out)
            b_out=self.bias_variable([self.n_class],name='bias_out')
            tf.summary.histogram('bias_out',b_out)
            self._y_pred=tf.nn.xw_plus_b(fc_bn_drop,W_out,b_out,name='y_pred')
        with tf.name_scope('loss'):
            self._loss=tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(logits=self._y_pred,labels=self._y_inputs))
            tf.summary.scalar('loss',self._loss)
        self.saver=tf.train.Saver(max_to_keep=2)
    @property
    def tst(self):
        return self._tst
    
    @property
    def keep_pro(self):
        return self._keep_prob
    
    @property
    def batch_size(self):
        return self._batch_size
    
    @property
    def global_step(self):
        return self._global_step
    
    @property
    def X1_inputs(self):
        return self._X1_inputs
    
    @propery
    def X2_inputs(self):
        return self._X2_inputs
    
    @property
    def y_inputs(self):
        return self._y_inputs
    
    @property
    def y_pred(self):
        return self._y_pred
    
    @propery
    def loss(self):
        return self._loss
    
    
    def weight_variable(self,shape,name):    
        initial=tf.truncated_normal(shape,stddev=0.1)
        return tf.Variable(initial,name=name)
    def bias_variable(self,shape,name):    
        initial=tf.constant(0.1,shape=shape)
        return tf.Variable(initial,name=name)        
    def batchnorm(self, Ylogits, offset, convolutional=False):
        """batchnormalization.
        Args:
            Ylogits: 1D向量或者是3D的卷积结果。
            num_updates: 迭代的global_step
            offset:表示beta,全局均值;在 RELU 激活中一般初始化为 0.1。
            scale:表示lambda,全局方差;在 sigmoid 激活中需要,这 RELU 激活中作用不大。
            m: 表示batch均值;v:表示batch方差。
            bnepsilon:一个很小的浮点数,防止除以 0.
        Returns:
            Ybn: 和 Ylogits 的维度一样,就是经过 Batch Normalization 处理的结果。
            update_moving_everages:更新mean和variance,主要是给最后的 test 使用。
        """
        exp_moving_avg=tf.train.ExponentialMovingAverage(0.999,self._global_step)
        bnepsilon=1e-5
        if convolutional:
            #以某一维度计算均值或方差
            mean,variance=tf.nn.moments(Ylogits,[0,1,2])#只剩下卷积核那个维度的维数了
        else:
            mean,variance=tf.nn.moments(Ylogits,[0])
        update_moving_averages=exp_moving_avg.apply([mean,variance])
        tf.cond(self.tst,lambda: exp_moving_avg.average(mean), lambda: mean)
        tf.cond(self.tst,lambda: exp_moving_avg.average(variance), lambda: variance)
        Ybn=tf.nn.batch_normalization(Ylogits,m,v,offset,None,bnepsilon)
        return Ybn,update_moving_averages
        
        
    def cnn_inference(self,X_inputs,n_step):
         """TextCNN 模型。
        Args:
            X_inputs: tensor.shape=(batch_size, n_step)
        Returns:
            title_outputs: tensor.shape=(batch_size, self.n_filter_total)
        """
        inputs=tf.nn.embedding_lookup(self.embedding, X_inputs)#batch_size, n_step,embedding_size
        inputs=tf.expand_dims(input,-1)#batch_size, n_step,embedding_size,1
        pooled_outputs=list()
        for i,filter_size in enumerate(self.filter_sizes):
            with tf.variable_scope('conv-maxpool-%s'%filter_size):  
                filter_shape=[filter_size,self.embedding_size,1,self.n_filter]
                W_filter=self.weight_variable(shape=filter_shape,name='W_filter')
                beta=self.bias_variable(shape=[self.n_filter],name='beta_filter')
                tf.summary.histogram('beta',beta)
                conv=tf.nn.conv2d(inputs,W_filter,strides=[1,1,1,1],padding='VALID',name='conv')
                #激活层前面加BN,注意
                conv_bn,update_emas=self.batchnorm(conv,beta,convolutional=True)
                #激活层
                h=tf.nn.relu(conv_bn,name="relu")
                #池化层
                pooled=tf.nn.max_pool(h,ksize=[1,n_step-filter_size+1,1,1],strides=[1,1,1,1],padding='VALID',name='pool')
                pooled_outputs.append(pooled)#shape of pooled [1,1,1,X]
                self.update_emas.append(update_emas)
        h_pool=tf.concate(pooled_outputs,3)
        h_pool_flat=tf.reshape(h_pool,[-1,self.n_filter_total])
        return h_pool_flat#batch_size(一批有几条数据)*n_filter_total(一条数据可以获得的最大池化值)