コード例 #1
0
ファイル: item2vec.py プロジェクト: xxqcheers/board-yet
    def build_training_graph(self, batch, labels):
        """Takes in the graph nodes representing a training batch and
        associated labels, and builds the forward training graph, 
        including the embedding itself. Returns nodes representing
        the logits for the positive examples, as well as the logits
        for associated negatives for negative sampling."""
        # We do this because word2vec initializes the weights this way
        init_width = 0.5 / self.embed_dim
        # The actual embedding:
        # The shape of the tensor is weird because we are going to use the
        # "embedding_lookup" function instead of just multipling with a 1-hot.
        emb = tf.Variable(tf.random_uniform([self.vocab_size, self.embed_dim],
                                            -init_width, init_width),
                          name="embedding")
        self.emb = emb
        
        # For training, we actually need to train a softmax classifier.
        # This tensor can be thought of as a complete softmax unit for
        # every possible game. (Each row is a set of weights)
        softmax_w = tf.Variable(tf.zeros([self.vocab_size, self.embed_dim]),
                                name="softmax_weights")
        softmax_b = tf.Variables(tf.zeros([self.vocab_size]), 
                                 name="softmax_bias")

        # Negative sampling for SGNS. We make the assumption of sparsity.
        # On average, randomly sampled games will be negatives.
        labels_reformat = tf.reshape(tf.cast(labels, dtype=tf.int64),
                            [len(training_labels), 1])
        sampled_ids = tf.nn.fixed_unigram_candidate_sampler(
                true_classes=labels_reformat,
                num_true=1,
                num_sampled=self.num_negatives,
                unique=True,
                range_max=self.vocab_size,
                distortion=0.75,
                unigrams=self.total_item_counts)

        batch_embeds = tf.embedding_lookup(emb, batch)
        # Lookup the softmax classifiers for the training batch.
        # I don't particularly like the use of "embedding_lookup",
        # because softmax_w etc. aren't technically embedding
        # matrices. This is apparently the canonical way to do it in TF though.
        batch_sm_w = tf.embedding_lookup(softmax_w, labels)
        batch_sm_b = tf.embedding_lookup(softmax_b, labels)

        # Lookup the softmax classifers for the negative samples.
        neg_sm_w = tf.embedding_lookup(softmax_w, sampled_ids)
        neg_sm_b = tf.embedding_lookup(softmax_b, sampled_ids)

        # Produces a tensor that represents the logits (the arg of the
        # exponential numerator in softmax) for each of the examples
        # in the training batch.
        batch_logits = (tf.reduce_sum(tf.mul(batch_embeds, batch_sm_w), 1) 
                        + batch_sm_b)
        neg_logits = (tf.reduce_sum(tf.mul(batch_embeds, 
                                           neg_sm_w, transpose_b=True)) +
                      tf.reshape(neg_sm_b, [self.num_negatives]))

        return batch_logits, neg_logits
コード例 #2
0
 def build_graph(self):
     user_embedding,item_embedding = self.gcn()
     uemb = tf.embedding_lookup(user_embedding,self.user)
     vemb = tf.embedding_lookup(item_embedding,self.item)
     emb = tf.concat([uemb,vemb],axis=1)
     x = emb
     for s in self.fc_layers:
         x = tf.layers.dense(x,s,activation=tf.nn.relu)
     self.v = tf.reduce_sum(x,axis=1)
     self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value,self.v)))
     self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
     init = tf.global_variables_initializer()
     self.sess.run(init)
コード例 #3
0
ファイル: seq2seq.py プロジェクト: zerkh/seq2seq_copy
 def extract_argmax_and_embed(prev, _):
   """Loop_function that extracts the symbol from prev and embeds it."""
   if output_projection is not None:
     prev = tf.xw_plus_b(
         prev, output_projection[0], output_projection[1])
   prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
   emb_prev = tf.embedding_lookup(embedding, prev_symbol)
   return emb_prev
コード例 #4
0
 def extract_argmax_and_embed(prev, _):
     """Loop_function that extracts the symbol from prev and embeds it."""
     if output_projection is not None:
         prev = tf.xw_plus_b(prev, output_projection[0],
                             output_projection[1])
     prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
     emb_prev = tf.embedding_lookup(embedding, prev_symbol)
     return emb_prev
コード例 #5
0
ファイル: estimator.py プロジェクト: SongFGH/elephant
 def show(self, ):
     node_vectors = [
         tensorflow.embedding_lookup(id, self.vocabulary_sizes[1],
                                     self.layer_size, str(1))
         for id in range(self.vocabulary_sizes[1])
     ]
     print(zip(
         range(self.vocabulary_sizes[1]),
         node_vectors,
     ))
コード例 #6
0
 def inference(inputs):
     """
 		inputs: a list containing a sequence word ids
 		"""
     outputs = []
     state = cell.zero_state(1, tf.float32)  # 1 means only one sequence
     embed = tf.embedding_lookup(embedding, inputs)
     sequence_length = len(inputs)
     for i in range(sequence_length):
         cell_output, state = cell(embed[:, i, :], state)
         logits = tf.nn.xw_plus_b(cell_output, softmax_w, softmax_b)
         probability = tf.nn.softmax(logits)
         outputs.append(probability)
     return outputs
コード例 #7
0
def get_user_embedding(user_id,user_gender,user_age,user_job):
    with tf.name_scope("user_embedding"):
        user_id_embed_matrix=tf.Variable(tf.random_uniform([userid_max,emded_dim],-1,1),name="user_id_embed_matrix")
        user_id_embed_layer=tf.embedding_lookup(user_id_embed_matrix,user_id,name="user_id_embed_layer")

        gender_embed_matrix=tf.Variable(tf.random_uniform([gender_max,embed_dim//2],-1,1),name="gender_embed_matrix")
        gender_embed_layer=tf.nn.embedding_lookup(gender_embed_matrix,user_gender,name="gender_embed_layer")

        age_embed_matrix=tf.Variable(tf.random_uniform([age_max,embed_dim//2],-1,1),name="age_embed_matrix")
        age_embed_layer=tf.nn.embedding_lookup(age_embed_matrix,user_age,name="age_embed_layer")

        job_embed_matrix=tf.Variable(tf.random_uniform([job_max,embed_dim//2],-1,1),name="job_embed_matrix")
        job_embed_layer=tf.nn.embedding_lookup(job,user_job,name="job_embed_layer")

    return user_id_embed_layer,gender_embed_layer,age_embed_layer,job_embed_layer
コード例 #8
0
ファイル: rnn_cell.py プロジェクト: chaoshunh/optimization
	def __call__(self, inputs, state, scope=None):
		"""Run the cell on embedded inputs."""
		with tf.variable_scope(scope or type(self).__name__):	# "EmbeddingWrapper"
			with tf.device("/cpu:0"):
				if self._initializer:
					initializer = self._initializer
				elif tf.get_variable_scope().initializer:
					initializer = tf.get_variable_scope().initializer
				else:
					# Default initializer for embeddings should have variance=1.
					sqrt3 = math.sqrt(3)	# Uniform(-sqrt(3), sqrt(3)) has variance=1.
					initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)
				embedding = tf.get_variable("embedding", [self._embedding_classes,
																									self._embedding_size],
																		initializer=initializer)
				embedded = tf.embedding_lookup(
						embedding, tf.reshape(inputs, [-1]))
		return self._cell(embedded, state)
コード例 #9
0
ファイル: bayes_tf.py プロジェクト: eazlong/tensorflow_learn
def neural_network():
    with tf.device('/cpu:0'), tf.name_scope("embedding"):
        embedding_size = 128
        W = tf.Variable(
            tf.random_uniform([input_size, embedding_size], -1.0, 1.0))
        embedding_chars = tf.embedding_lookup(W, X)
        embedded_chars_expanded = tf.expand_dims(embedding_chars, -1)
    num_filter = 128
    filter_size = [3, 4, 5]
    pooled_output = []
    for i, filter_size in enumerate(filter_sizes):
        with tf.name_scope("conv_maxpool_%s" % filter_size):
            filter_shape = [filter_size, embedding_size, 1, num_filter]
            W = tf.Variable(tf.truncated_normal(filter_shape, stdev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[num_filter]))
            conv = tf.conv2d(embedded_chars_expanded,
                             W,
                             strides=[1, 1, 1, 1],
                             padding="VALID")
            h = tf.nn.relu(tf.nn.bias_add(conv, b))
            pooled = tf.nn.max_pool(h,
                                    ksize=[1, input_size - file_size, 1, 1],
                                    strides=[1, 1, 1, 1],
                                    padding="VALID")
            pooled_output.append(pooled)

    num_filters_total = num_filters * len(filter_sizes)
    h_pool = tf.concat(3, pooled_outputs)
    h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
    # dropout
    with tf.name_scope("dropout"):
        h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
    # output
    with tf.name_scope("output"):
        W = tf.get_variable("W",
                            shape=[num_filters_total, num_classes],
                            initializer=tf.contrib.layers.xavier_initializer())
        b = tf.Variable(tf.constant(0.1, shape=[num_classes]))
        output = tf.nn.xw_plus_b(h_drop, W, b)

    return output
コード例 #10
0
    def _create_loss(self):
        with tf.device('/cpu:0'):
            with tf.name_scope('loss'):
                embed = tf.embedding_lookup(self.embed_matrix,
                                            self.center_words,
                                            name='embed')

                nce_weight = tf.Variable(tf.truncated_normal(
                    [self.vocab_size, self.embed_size],
                    stddev=1.0 / (self.embed_size**0.5)),
                                         name='nce_weight')

                nce_bias = tf.Variable(tf.zeros([self.vocab_size]),
                                       name='nce_bias')

                self.loss = tf.reduce_mean(tf.nn.nce_loss(
                    weights=nce_weights,
                    biases=nce_bias,
                    labels=self.target_words,
                    input=embed,
                    num_sampled=self.num_sampled,
                    num_classes=self.vocab_size),
                                           name='loss')
コード例 #11
0
        def loop(prev, _):
            """Function to be performed at each recurrent layer.

            This function will be applied to the i-th output in order to generate the i+1-st input, and
            decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used
             for decoding, but also for training to  emulate http://arxiv.org/abs/1506.03099.

            Signature -- loop_function(prev, i) = next
                    * prev is a 2D Tensor of shape [batch_size x output_size],
                    * i is an integer, the step number (when advanced control is needed),
                    * next is a 2D Tensor of shape [batch_size x input_size].
                scope: VariableScope for the created subgraph; defaults to "rnn_decoder".

            Arguments:
                prev {tf.Tensor} -- prev is a 2D Tensor of shape [batch_size x output_size].
                _ {tf.Tensor} -- i is an integer, the step number (when advanced control is needed).

            Returns:
                {tf.Tensor} -- A 2D Tensor of shape [batch_size, input_size] which represents
                the embedding matrix of the predicted next character.
            """
            prev = tf.matmul(prev, softmax_W) + softmax_b
            prev_symbol = tf.stop_gradient(input=tf.arg_max(prev, dimension=1))
            return tf.embedding_lookup(embedding, prev_symbol)