コード例 #1
0
    def create_critic_network(self, Scope):
        inputs = tf.placeholder(shape=[1, self.max_lenth], dtype=tf.int32, name="inputs")
        action = tf.placeholder(shape=[1, self.max_lenth], dtype=tf.int32, name="action")
        action_pos = tf.placeholder(shape=[1, None], dtype=tf.int32, name="action_pos")
        lenth = tf.placeholder(shape=[1], dtype=tf.int32, name="lenth")
        lenth_up = tf.placeholder(shape=[1], dtype=tf.int32, name="lenth_up")
       
        #Lower network
        if Scope[-1] == 'e':
            vec = tf.nn.embedding_lookup(self.wordvector, inputs)
            print "active"
        else:
            vec = tf.nn.embedding_lookup(self.target_wordvector, inputs)
            print "target"
        cell = LSTMCell(self.dim, initializer=self.init, state_is_tuple=False)
        self.state_size = cell.state_size
        actions = tf.to_float(action)
        h = cell.zero_state(1, tf.float32)
        embedding = []
        for step in range(self.max_lenth):
            with tf.variable_scope("Lower/"+Scope, reuse=True):
                o, h = cell(vec[:,step,:], h)
            embedding.append(o[0])
            h = h *(1.0 - actions[0,step])

        #Upper network
        embedding = tf.stack(embedding)
        embedding = tf.gather(embedding, action_pos, name="Upper_input")
        with tf.variable_scope("Upper", reuse=True):
            out, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell, embedding, lenth_up, dtype=tf.float32, scope=Scope)

        if self.isAttention:
            out = tf.concat(out, 2)
            out = out[0,:,:]
            tmp = tflearn.fully_connected(out, self.dim, scope=Scope, name="att")
            tmp = tflearn.tanh(tmp)
            with tf.variable_scope(Scope):
                v_T = tf.get_variable("v_T", dtype=tf.float32, shape=[self.dim, 1], trainable=True)
            a = tflearn.softmax(tf.matmul(tmp,v_T))
            out = tf.reduce_sum(out * a, 0)
            out = tf.expand_dims(out, 0)
        else:
            #out = embedding[:, -1, :]
            out = tf.concat((out[0][:,-1,:], out[1][:,0,:]), 1)

        out = tflearn.dropout(out, self.keep_prob)
        out = tflearn.fully_connected(out, self.grained, scope=Scope+"/pred", name="get_pred")
        return inputs, action, action_pos, lenth, lenth_up, out
コード例 #2
0
  def create_model(self, l, tN, N=100000, d=10, K=5, H=1000, m=0.05, reuse=False):
    '''
    N = 1000000 (Paper)
    d = Unknown
    '''
    with tf.variable_scope('TagSpace', reuse=reuse):
      lr = tf.placeholder('float32', shape=[1], name='lr')
      doc = tf.placeholder('float32', shape=[None, l], name='doc')
      tag_flag = tf.placeholder('float32', shape=[None, tN], name='tag_flag')

      doc_embed = tflearn.embedding(doc, input_dim=N, output_dim=d)
      self.lt_embed = lt_embed = tf.Variable(tf.random_normal([tN, d], stddev=0.1))

      net = tflearn.conv_1d(doc_embed, K, H, activation='tanh')
      net = tflearn.max_pool_1d(net, l)
      net = tflearn.tanh(net)
      self.logit = logit = tflearn.fully_connected(net, d, activation=None)

      zero_vector = tf.zeros(shape=(1,1), dtype=tf.float32)

      logit = tf.expand_dims(logit, 1)
      logit_set = tf.concat([logit for i in range(tN)], axis=1)

      tag_flag_ex = tf.expand_dims(tag_flag, 2)
      tg = tf.concat([tag_flag_ex for i in range(d)], axis=2)

      self.tag_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tf.ones_like(tg), lt_embed)), axis=2)

      self.positive_logit = positive_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tg, lt_embed)), axis=2)
      with tf.device('/cpu:0'):
        self.f_positive = f_positive = tf.map_fn(lambda x: (tf.boolean_mask(x[0], x[1]), True), (positive_logit, tf.not_equal(positive_logit, zero_vector)))
      positive = tf.reduce_min(f_positive[0], axis=1)
      self.positive = positive

      tag_flag_ex = tf.expand_dims(1-tag_flag, 2)
      tg = tf.concat([tag_flag_ex for i in range(d)], axis=2)
      negative_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tg, lt_embed)), axis=2)

      with tf.device('/cpu:0'):
        self.f_negative = f_negative = tf.map_fn(lambda x: (tf.boolean_mask(x[0], x[1]), True), (negative_logit, tf.not_equal(negative_logit, zero_vector)))
      self.negative = negative = tf.reduce_max(f_negative[0], axis=1)

      self.f_loss = f_loss = tf.reduce_mean(tf.reduce_max([tf.reduce_min([tf.expand_dims(m - positive + negative,1), tf.expand_dims(tf.fill([tf.shape(doc)[0]], 10e7),1)], axis=0), tf.zeros([tf.shape(doc)[0], 1])], axis=0))

      opt = tf.train.AdamOptimizer(learning_rate=lr[0])
      self.op = opt.minimize(f_loss)
コード例 #3
0
from tflearn.helpers.evaluator import Evaluator
from tflearn.helpers.summarizer import summarize
from tflearn.helpers.regularizer import add_weights_regularizer
from tensorflow.contrib.slim import dataset
from tensorflow.contrib.slim import dataset

tflearn.input_data()
tflearn.variable()
tflearn.conv_2d()
tflearn.single_unit()
tflearn.lstm()
tflearn.embedding()
tflearn.batch_normalization()
tflearn.merge()
tflearn.regression()
tflearn.tanh()
tflearn.softmax_categorical_crossentropy()
tflearn.SGD()
tflearn.initializations.uniform()
tflearn.losses.L1()
tflearn.add_weights_regularizer()
tflearn.metrics.Accuracy()
tflearn.summaries()
tflearn.ImagePreprocessing()
tflearn.ImageAugmentation()
tflearn.init_graph()




コード例 #4
0
ファイル: mytrain.py プロジェクト: bowendoctor/TagSpace
l = word_pad_length
tN = tag_size
N=100000
d=10
K=5
H=1000
m=0.05
lr = tf.placeholder('float32', shape=[1], name='lr')
doc = tf.placeholder('float32', shape=[None, l], name='doc')
tag_flag = tf.placeholder('float32', shape=[None, tN], name='tag_flag')

doc_embed = tflearn.embedding(doc, input_dim=N, output_dim=d)
lt_embed = tf.Variable(tf.random_normal([tN, d], stddev=0.1))
net = tflearn.conv_1d(doc_embed, H, K, activation='tanh')
net = tflearn.max_pool_1d(net, K)
net = tflearn.tanh(net)
logit = tflearn.fully_connected(net, d, activation=None)

zero_vector = tf.zeros(shape=(1,1), dtype=tf.float32)

logit = tf.expand_dims(logit, 1)
logit_set = tf.concat([logit for i in range(tN)], axis=1)

tag_flag_ex = tf.expand_dims(tag_flag, 2)
tg = tf.concat([tag_flag_ex for i in range(d)], axis=2)

tag_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tf.ones_like(tg), lt_embed)), axis=2)

positive_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tg, lt_embed)), axis=2)
random_sample = tf.random_uniform([batch_size],minval=0,maxval=1,dtype=tf.float32)
#f_test = tf.not_equal(positive_logit, zero_vector)