def _create_variables(self): with tf.name_scope("embedding"): # The embedding initialization is unknown now self.weights_q, self.biases_q = [], [] weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) bias_initializer = tool.get_initializer(self.bias_init_method, self.stddev) for i, (d_in, d_out) in enumerate(zip(self.q_dims[:-1], self.q_dims[1:])): if i == len(self.q_dims[:-1]) - 1: # we need two sets of parameters for mean and variance, # respectively d_out *= 2 weight_key = "weight_q_{}to{}".format(i, i+1) bias_key = "bias_q_{}".format(i+1) self.weights_q.append(tf.Variable(weight_initializer([d_in, d_out]), name=weight_key, dtype=tf.float32)) self.biases_q.append(tf.Variable(bias_initializer([d_out]), name=bias_key, dtype=tf.float32)) self.weights_p, self.biases_p = [], [] for i, (d_in, d_out) in enumerate(zip(self.p_dims[:-1], self.p_dims[1:])): weight_key = "weight_p_{}to{}".format(i, i+1) bias_key = "bias_p_{}".format(i+1) self.weights_p.append(tf.Variable(weight_initializer([d_in, d_out]), name=weight_key, dtype=tf.float32)) self.biases_p.append(tf.Variable(bias_initializer([d_out]), name=bias_key, dtype=tf.float32))
def _create_variables(self, params=None): with tf.name_scope( "embedding"): # The embedding initialization is unknown now if params is None: embed_initializer = tool.get_initializer( self.embed_init_method, self.stddev) self.c1 = tf.Variable(embed_initializer( [self.num_items, self.embedding_size]), name='c1', dtype=tf.float32) self.embedding_Q = tf.Variable(embed_initializer( [self.num_items, self.embedding_size]), name='embedding_Q', dtype=tf.float32) self.bias = tf.Variable(tf.zeros(self.num_items), name='bias') else: self.c1 = tf.Variable(params[0], name='c1', dtype=tf.float32) self.embedding_Q = tf.Variable(params[1], name='embedding_Q', dtype=tf.float32) self.bias = tf.Variable(params[2], name="bias", dtype=tf.float32) self.c2 = tf.constant(0.0, tf.float32, [1, self.embedding_size], name='c2') self.embedding_Q_ = tf.concat([self.c1, self.c2], axis=0, name='embedding_Q_') # Variables for attention weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) if self.algorithm == 0: self.W = tf.Variable(weight_initializer( [self.embedding_size, self.weight_size]), name='Weights_for_MLP', dtype=tf.float32, trainable=True) else: self.W = tf.Variable(weight_initializer( [2 * self.embedding_size, self.weight_size]), name='Weights_for_MLP', dtype=tf.float32, trainable=True) self.b = tf.Variable(weight_initializer([1, self.weight_size]), name='Bias_for_MLP', dtype=tf.float32, trainable=True) self.h = tf.Variable(tf.ones([self.weight_size, 1]), name='H_for_MLP', dtype=tf.float32)
def _init_weights(self): all_weights = dict() embed_initializer = tool.get_initializer(self.embed_init_method, self.stddev) weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) if self.pre_train_data is None: all_weights['user_embedding'] = tf.Variable(embed_initializer( [self.num_users, self.emb_dim]), name='user_embedding') all_weights['item_embedding'] = tf.Variable(embed_initializer( [self.num_items, self.emb_dim]), name='item_embedding') self.logger.info('using xavier initialization') else: all_weights['user_embedding'] = tf.Variable( initial_value=self.pre_train_data['user_embed'], trainable=True, name='user_embedding', dtype=tf.float32) all_weights['item_embedding'] = tf.Variable( initial_value=self.pre_train_data['item_embed'], trainable=True, name='item_embedding', dtype=tf.float32) self.logger.info('using pretrained initialization') self.weight_size_list = [self.emb_dim] + self.weight_size for k in range(self.n_layers): all_weights['W_gc_%d' % k] = tf.Variable(weight_initializer( [self.weight_size_list[k], self.weight_size_list[k + 1]]), name='W_gc_%d' % k) all_weights['b_gc_%d' % k] = tf.Variable(weight_initializer( [1, self.weight_size_list[k + 1]]), name='b_gc_%d' % k) all_weights['W_bi_%d' % k] = tf.Variable(weight_initializer( [self.weight_size_list[k], self.weight_size_list[k + 1]]), name='W_bi_%d' % k) all_weights['b_bi_%d' % k] = tf.Variable(weight_initializer( [1, self.weight_size_list[k + 1]]), name='b_bi_%d' % k) all_weights['W_mlp_%d' % k] = tf.Variable(weight_initializer( [self.weight_size_list[k], self.weight_size_list[k + 1]]), name='W_mlp_%d' % k) all_weights['b_mlp_%d' % k] = tf.Variable(weight_initializer( [1, self.weight_size_list[k + 1]]), name='b_mlp_%d' % k) return all_weights
def _create_variables(self, params=None): with tf.name_scope("embedding"): if params is None: initializer = tool.get_initializer(self.embed_init_method, self.stddev) self.embedding_P = tf.Variable( initializer([self.num_users, self.embedding_size]), name='embedding_P', dtype=tf.float32) # (users, embedding_size) self.embedding_Q = tf.Variable( initializer([self.num_items, self.embedding_size]), name='embedding_Q', dtype=tf.float32) # (items, embedding_size) else: self.embedding_P = tf.Variable( params[0], name='embedding_P', dtype=tf.float32) # (users, embedding_size) self.embedding_Q = tf.Variable( params[1], name='embedding_Q', dtype=tf.float32) # (items, embedding_size) # here should have 6 iszs due to the size of outer products is 64x64 iszs = [1] + self.nc[:-1] oszs = self.nc self.P = [] for isz, osz in zip(iszs, oszs): self.P.append(self._conv_weight(isz, osz)) self.W = self.weight_variable([self.nc[-1], 1]) # 32x1 self.b = self.weight_variable([1]) # 1
def _create_variables(self): with tf.name_scope( "embedding"): # The embedding initialization is unknown now initializer = tool.get_initializer(self.init_method, self.stddev) self.c1 = tf.Variable(initializer( [self.num_items, self.embedding_size]), name='c1', dtype=tf.float32) self.c2 = tf.constant(0.0, tf.float32, [1, self.embedding_size], name='c2') self.embedding_P = tf.concat([self.c1, self.c2], 0, name='embedding_P') self.embedding_Q = tf.Variable(initializer( [self.num_items, self.embedding_size]), name='embedding_Q', dtype=tf.float32) self.eta = tf.Variable(initializer( [self.num_users, self.high_order]), name='eta') self.eta_bias = tf.Variable(initializer([1, self.high_order]), name='eta_bias') self.bias = tf.Variable(tf.zeros(self.num_items), name='bias')
def _create_variables(self): with tf.name_scope("embedding"): initializer = tool.get_initializer(self.init_method, self.stddev) self.user_embedding = tf.Variable(initializer([self.num_users, self.embedding_size]), name='user_embedding') self.item_embedding = tf.Variable(initializer([self.num_items, self.embedding_size]), name='item_embedding') user_review_vectors = np.zeros((self.num_users, self.feature_dimension)) with open(self.user_feature_file, 'r') as f: for line in f.readlines(): user_idx, data = line.strip().split("::::") if user_idx in self.userids: inner_user_idx = self.userids[user_idx] user_review_vectors[inner_user_idx] = eval(data) self.user_review_vector_matrix = tf.constant(user_review_vectors, dtype=tf.float32) item_review_vectors = np.zeros((self.num_items, self.feature_dimension)) with open(self.item_feature_file, 'r') as f: for line in f.readlines(): item_idx, data = line.strip().split("::::") if item_idx in self.itemids: inner_item_idx = self.itemids[item_idx] item_review_vectors[inner_item_idx] = eval(data) self.item_review_vector_matrix = tf.constant(item_review_vectors, dtype=tf.float32) self.reduce_dimension_layer = tf.layers.Dense(self.embedding_size, activation=tf.nn.sigmoid, name='reduce_dimension_layer') self.item_fusion_layer = tf.layers.Dense(self.embedding_size, activation=tf.nn.sigmoid, name='item_fusion_layer') self.user_fusion_layer = tf.layers.Dense(self.embedding_size, activation=tf.nn.sigmoid, name='user_fusion_layer')
def _create_variables(self): with tf.name_scope( "embedding"): # The embedding initialization is unknown now initializer = tool.get_initializer(self.init_method, self.stddev) self.u_w1 = tf.Variable(initializer( [self.num_items, self.fist_layer_size]), name="u_w1") self.u_b1 = tf.Variable(initializer([self.fist_layer_size]), name="u_b1") self.u_w2 = tf.Variable(initializer( [self.fist_layer_size, self.last_layer_size]), name="u_w2") self.u_b2 = tf.Variable(initializer([self.last_layer_size]), name="u_b2") self.v_w1 = tf.Variable(initializer( [self.num_users, self.fist_layer_size]), name="v_w1") self.v_b1 = tf.Variable(initializer([self.fist_layer_size]), name="v_b1") self.v_w2 = tf.Variable(initializer( [self.fist_layer_size, self.last_layer_size]), name="v_w2") self.v_b2 = tf.Variable(initializer([self.last_layer_size]), name="v_b2")
def _create_variables(self): with tf.name_scope("embedding"): initializer = tool.get_initializer(self.init_method, self.stddev) self.user_embeddings = tf.Variable(initializer([self.num_users, self.embedding_size]), name='user_embeddings', dtype=tf.float32) # (users, embedding_size) self.item_embeddings = tf.Variable(initializer([self.num_items, self.embedding_size]), name='item_embeddings', dtype=tf.float32) # (items, embedding_size)
def _create_variables(self): with tf.name_scope("embedding"): # The embedding initialization is unknown now initializer = tool.get_initializer(self.init_method, self.stddev) self.V = tf.Variable(initializer([self.num_users, self.hidden_neuron])) self.weights = {'encoder': tf.Variable(initializer([self.num_items, self.hidden_neuron])), 'decoder': tf.Variable(initializer([self.hidden_neuron, self.num_items]))} self.biases = {'encoder': tf.Variable(initializer([self.hidden_neuron])), 'decoder': tf.Variable(initializer([self.num_items]))}
def _create_variables(self): with tf.name_scope("embedding"): # The embedding initialization is unknown now initializer = tool.get_initializer(self.init_method, self.stddev) self.mlp_embedding_user = tf.Variable(initializer([self.num_users, int(self.layers[0]/2)]), name="mlp_embedding_user", dtype=tf.float32) self.mlp_embedding_item = tf.Variable(initializer([self.num_items, int(self.layers[0]/2)]), name="mlp_embedding_item", dtype=tf.float32) self.dense_layer = [tf.layers.Dense(units=n_units, activation=tf.nn.relu, name="layer%d" % idx) for idx, n_units in enumerate(self.layers)]
def _create_variables(self): with tf.name_scope( "embedding"): # The embedding initialization is unknown now embed_initializer = tool.get_initializer(self.embed_init_method, self.stddev) self.user_embeddings = tf.Variable(embed_initializer( [self.num_users, self.embedding_size]), dtype=tf.float32, name='user_embeddings') self.item_embeddings = tf.Variable(embed_initializer( [self.num_items, self.embedding_size]), dtype=tf.float32, name='item_embeddings') weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) self.filters = [] for _ in range(self.num_layers): self.filters.append( tf.Variable(weight_initializer( [self.embedding_size, self.embedding_size]), dtype=tf.float32))
def _create_variables(self): with tf.name_scope( "embedding"): # The embedding initialization is unknown now self.weights = [] self.biases = [] weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) bias_initializer = tool.get_initializer(self.bias_init_method, self.stddev) # define weights for i, (d_in, d_out) in enumerate(zip(self.dims[:-1], self.dims[1:])): weight_key = "weight_{}to{}".format(i, i + 1) bias_key = "bias_{}".format(i + 1) self.weights.append( tf.Variable(weight_initializer([d_in, d_out]), name=weight_key, dtype=tf.float32)) self.biases.append( tf.Variable(bias_initializer([d_out]), name=bias_key, dtype=tf.float32))
def _create_variables(self): with tf.name_scope("embedding"): embed_initializer = tool.get_initializer(self.embed_init_method, self.stddev) weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) self.embeddings_UI = tf.Variable( embed_initializer([self.num_users, self.embedding_size]), name='embeddings_UI', dtype=tf.float32) # (users, embedding_size) self.embeddings_IU = tf.Variable( embed_initializer([self.num_items, self.embedding_size]), name='embeddings_IU', dtype=tf.float32) # (items, embedding_size) self.embeddings_IL = tf.Variable(embed_initializer( [self.num_items, self.embedding_size]), name='embeddings_IL', dtype=tf.float32) self.embeddings_LI = tf.Variable( embed_initializer([self.num_items, self.embedding_size]), name='embeddings_LI', dtype=tf.float32) # (items, embedding_size) self.W = tf.Variable(weight_initializer( [3 * self.embedding_size, self.weight_size]), name='Weights_for_MLP', dtype=tf.float32, trainable=True) self.b = tf.Variable(weight_initializer([1, self.weight_size]), name='Bias_for_MLP', dtype=tf.float32, trainable=True) self.h = tf.Variable(tf.ones([self.weight_size, 1]), name='H_for_MLP', dtype=tf.float32)
def _create_variables(self): with tf.name_scope( "embedding"): # The embedding initialization is unknown now initializer = tool.get_initializer(self.init_method, self.stddev) # user component # first layer weights self.UV = tf.Variable(initializer( [self.num_items, self.hidden_neuron]), name="UV", dtype=tf.float32) # second layer weights self.UW = tf.Variable(initializer( [self.hidden_neuron, self.num_items]), name="UW", dtype=tf.float32) # first layer bias self.Ub1 = tf.Variable(initializer([1, self.hidden_neuron]), name="Ub1", dtype=tf.float32) # second layer bias self.Ub2 = tf.Variable(initializer([1, self.num_items]), name="Ub2", dtype=tf.float32) # item component # first layer weights self.IV = tf.Variable(initializer( [self.num_users, self.hidden_neuron]), name="IV", dtype=tf.float32) # second layer weights self.IW = tf.Variable(initializer( [self.hidden_neuron, self.num_users]), name="IW", dtype=tf.float32) # first layer bias self.Ib1 = tf.Variable(initializer([1, self.hidden_neuron]), name="Ib1", dtype=tf.float32) # second layer bias self.Ib2 = tf.Variable(initializer([1, self.num_users]), name="Ib2", dtype=tf.float32) self.I_factor_vector = tf.Variable(initializer([1, self.num_items]), name="I_factor_vector", dtype=tf.float32)
def _create_variables(self): with tf.name_scope("embedding"): initializer = tool.get_initializer(self.init_method, self.stddev) self.embedding_P = tf.Variable( initializer([self.num_users, self.embedding_size]), name='embedding_P', dtype=tf.float32) # (users, embedding_size) self.embedding_Q = tf.Variable( initializer([self.num_items, self.embedding_size]), name='embedding_Q', dtype=tf.float32) # (items, embedding_size) self.delta_P = tf.Variable( tf.zeros(shape=[self.num_users, self.embedding_size]), name='delta_P', dtype=tf.float32, trainable=False) # (users, embedding_size) self.delta_Q = tf.Variable( tf.zeros(shape=[self.num_items, self.embedding_size]), name='delta_Q', dtype=tf.float32, trainable=False) # (items, embedding_size)
def _create_variables(self, params=None): with tf.name_scope( "embedding"): # The embedding initialization is unknown now if params is None: initializer = tool.get_initializer(self.init_method, self.stddev) self.mf_embedding_user = tf.Variable(initializer( [self.num_users, self.embedding_size]), name='mf_embedding_user', dtype=tf.float32) self.mf_embedding_item = tf.Variable(initializer( [self.num_items, self.embedding_size]), name='mf_embedding_item', dtype=tf.float32) self.mlp_embedding_user = tf.Variable( initializer([self.num_users, int(self.layers[0] / 2)]), name="mlp_embedding_user", dtype=tf.float32) self.mlp_embedding_item = tf.Variable( initializer([self.num_items, int(self.layers[0] / 2)]), name="mlp_embedding_item", dtype=tf.float32) else: self.mf_embedding_user = tf.Variable(params[0][0], name='mf_embedding_user', dtype=tf.float32) self.mf_embedding_item = tf.Variable(params[0][1], name='mf_embedding_item', dtype=tf.float32) self.mlp_embedding_user = tf.Variable( params[1][0], name="mlp_embedding_user", dtype=tf.float32) self.mlp_embedding_item = tf.Variable( params[1][1], name="mlp_embedding_item", dtype=tf.float32)
def _create_variables(self, params=None): with tf.name_scope( "embedding"): # The embedding initialization is unknown now if params is None: embed_initializer = tool.get_initializer( self.embed_init_method, self.stddev) self.c1 = tf.Variable(embed_initializer( [self.num_items, self.embedding_size]), name='c1', dtype=tf.float32) self.embedding_Q = tf.Variable(embed_initializer( [self.num_items, self.embedding_size]), name='embedding_Q', dtype=tf.float32) self.bias = tf.Variable(tf.zeros(self.num_items), name='bias') else: self.c1 = tf.Variable(params[0], name='c1', dtype=tf.float32) self.embedding_Q = tf.Variable(params[1], name='embedding_Q', dtype=tf.float32) self.bias = tf.Variable(params[2], name="bias", dtype=tf.float32) self.c2 = tf.constant(0.0, tf.float32, [1, self.embedding_size], name='c2') self.embedding_Q_ = tf.concat([self.c1, self.c2], axis=0, name='embedding_Q_') # Variables for attention weight_initializer = tool.get_initializer(self.weight_init_method, self.stddev) bias_initializer = tool.get_initializer(self.bias_init_method, self.stddev) if self.algorithm == 0: self.W = tf.Variable(weight_initializer( [self.embedding_size, self.weight_size]), name='Weights_for_MLP', dtype=tf.float32, trainable=True) else: self.W = tf.Variable(weight_initializer( [2 * self.embedding_size, self.weight_size]), name='Weights_for_MLP', dtype=tf.float32, trainable=True) self.b = tf.Variable(bias_initializer([1, self.weight_size]), name='Bias_for_MLP', dtype=tf.float32, trainable=True) self.h = tf.Variable(tf.ones([self.weight_size, 1]), name='H_for_MLP', dtype=tf.float32) # Variables for DeepICF+a self.weights = { 'out': tf.Variable(weight_initializer([self.n_hidden[-1], 1]), name='weights_out') } self.biases = { 'out': tf.Variable(tf.random_normal([1]), name='biases_out') } n_hidden_0 = self.embedding_size for i in range(len(self.n_hidden)): if i > 0: n_hidden_0 = self.n_hidden[i - 1] n_hidden_1 = self.n_hidden[i] self.weights['h%d' % i] = tf.Variable(weight_initializer( [n_hidden_0, n_hidden_1]), name='weights_h%d' % i) self.biases['b%d' % i] = tf.Variable(tf.random_normal( [n_hidden_1]), name='biases_b%d' % i)
#!/usr/local/bin/python
def weight_variable(self, shape): initializer = tool.get_initializer(self.weight_init_method, self.stddev) return tf.Variable(initializer(shape))