def _build_low_layers(self, args): self.user_emb_matrix = tf.get_variable('user_emb_matrix', [self.n_user, args.dim]) self.item_emb_matrix = tf.get_variable('item_emb_matrix', [self.n_item, args.dim]) self.entity_emb_matrix = tf.get_variable('entity_emb_matrix', [self.n_entity, args.dim]) self.relation_emb_matrix = tf.get_variable('relation_emb_matrix', [self.n_relation, args.dim]) # [batch_size, dim] self.user_embeddings = tf.nn.embedding_lookup(self.user_emb_matrix, self.user_indices) self.item_embeddings = tf.nn.embedding_lookup(self.item_emb_matrix, self.item_indices) self.head_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.head_indices) self.relation_embeddings = tf.nn.embedding_lookup( self.relation_emb_matrix, self.relation_indices) self.tail_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.tail_indices) for _ in range(args.L): user_mlp = Dense(input_dim=args.dim, output_dim=args.dim) tail_mlp = Dense(input_dim=args.dim, output_dim=args.dim) cc_unit = CrossCompressUnit(args.dim) self.user_embeddings = user_mlp(self.user_embeddings) self.item_embeddings, self.head_embeddings = cc_unit( [self.item_embeddings, self.head_embeddings]) self.tail_embeddings = tail_mlp(self.tail_embeddings) self.vars_rs.extend(user_mlp.vars) self.vars_rs.extend(cc_unit.vars) self.vars_kge.extend(tail_mlp.vars) self.vars_kge.extend(cc_unit.vars)
def __init__(self, args, n_users, n_items, n_entities, n_relations): super(MKR, self).__init__() self.n_user = n_users self.n_item = n_items self.n_entity = n_entities self.n_relation = n_relations self.L = args.L self.H = args.H self.dim = args.dim # 定义embedding矩阵 self.user_emb_matrix = nn.Embedding(n_users, args.dim) self.item_emb_matrix = nn.Embedding(n_items, args.dim) self.entity_emb_matrix = nn.Embedding(n_entities, args.dim) self.relation_emb_matrix = nn.Embedding(n_relations, args.dim) # 定义网络 self.user_mlps, self.tail_mlps, self.cc_units = [], [], [] self.kge_mlps = [] for _ in range(args.L): self.user_mlps.append(Dense(args.dim, args.dim)) self.tail_mlps.append(Dense(args.dim, args.dim)) self.cc_units.append(CrossCompressUnit(args.dim)) for _ in range(args.H): self.kge_mlps.append(Dense(args.dim * 2, args.dim * 2)) self.kge_pred_mlp = Dense(args.dim * 2, args.dim) self.sigmoid = nn.Sigmoid()
def _build_low_layers(self, args): if self.restore_path is None: self.user_emb_matrix = tf.get_variable('user_emb_matrix', [self.n_user, args.dim]) self.item_emb_matrix = tf.get_variable('item_emb_matrix', [self.n_item, args.dim]) self.entity_emb_matrix = tf.get_variable('entity_emb_matrix', [self.n_entity, args.dim]) self.relation_emb_matrix = tf.get_variable('relation_emb_matrix', [self.n_relation, args.dim]) else: self.user_emb = tf.placeholder(tf.float32, [None, args.dim], 'user_emb') self.item_emb = tf.placeholder(tf.float32, [None, args.dim], 'item_emb') self.entity_emb = tf.placeholder(tf.float32, [None, args.dim], 'entity_emb') self.relation_emb = tf.placeholder(tf.float32, [None, args.dim], 'relation_emb') self.user_emb_matrix = tf.Variable(tf.truncated_normal([self.n_user, args.dim]), name='user_emb_matrix', trainable=True) self.item_emb_matrix = tf.Variable(tf.truncated_normal([self.n_item, args.dim]), name='item_emb_matrix', trainable=True) self.entity_emb_matrix = tf.Variable(tf.truncated_normal([self.n_entity, args.dim]), name='entity_emb_matrix', trainable=True) self.relation_emb_matrix = tf.Variable(tf.truncated_normal([self.n_relation, args.dim]), name='relation_emb_matrix', trainable=True) self.user_emb_init = self.user_emb_matrix.assign(self.user_emb) self.item_emb_init = self.item_emb_matrix.assign(self.item_emb) self.entity_emb_init = self.entity_emb_matrix.assign(self.entity_emb) self.relation_emb_init = self.relation_emb_matrix.assign(self.relation_emb) # [batch_size, dim] self.user_embeddings = tf.nn.embedding_lookup(self.user_emb_matrix, self.user_indices) self.item_embeddings = tf.nn.embedding_lookup(self.item_emb_matrix, self.item_indices) self.head_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.head_indices) self.relation_embeddings = tf.nn.embedding_lookup(self.relation_emb_matrix, self.relation_indices) self.tail_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.tail_indices) for _ in range(args.L): user_mlp = Dense(input_dim=args.dim, output_dim=args.dim, dropout=self.dropout_param) tail_mlp = Dense(input_dim=args.dim, output_dim=args.dim, dropout=self.dropout_param) cc_unit = CrossCompressUnit(args.dim) self.user_embeddings = user_mlp(self.user_embeddings) self.item_embeddings, self.head_embeddings = cc_unit([self.item_embeddings, self.head_embeddings]) self.tail_embeddings = tail_mlp(self.tail_embeddings) self.vars_rs.extend(user_mlp.vars) self.vars_rs.extend(cc_unit.vars) self.vars_kge.extend(tail_mlp.vars) self.vars_kge.extend(cc_unit.vars)
def _build_low_layers(self, args): # 创建用户嵌入矩阵的tf变量,其shape为用户数*嵌入维度 self.user_emb_matrix = tf.get_variable('user_emb_matrix', [self.n_user, args.dim]) self.item_emb_matrix = tf.get_variable('item_emb_matrix', [self.n_item, args.dim]) self.entity_emb_matrix = tf.get_variable('entity_emb_matrix', [self.n_entity, args.dim]) self.relation_emb_matrix = tf.get_variable('relation_emb_matrix', [self.n_relation, args.dim]) # [batch_size, dim] # 选取user_emb_matrix张量中,user_indices为索引时对应的元素 self.user_embeddings = tf.nn.embedding_lookup(self.user_emb_matrix, self.user_indices) self.item_embeddings = tf.nn.embedding_lookup(self.item_emb_matrix, self.item_indices) self.head_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.head_indices) self.relation_embeddings = tf.nn.embedding_lookup( self.relation_emb_matrix, self.relation_indices) self.tail_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.tail_indices) for _ in range(args.L): # Dense 一个线性回归过程 user_mlp = Dense(input_dim=args.dim, output_dim=args.dim) tail_mlp = Dense(input_dim=args.dim, output_dim=args.dim) # 交叉压缩单元 cc_unit = CrossCompressUnit(args.dim) self.user_embeddings = user_mlp(self.user_embeddings) self.item_embeddings, self.head_embeddings = cc_unit( [self.item_embeddings, self.head_embeddings]) self.tail_embeddings = tail_mlp(self.tail_embeddings) self.vars_rs.extend(user_mlp.vars) self.vars_rs.extend(cc_unit.vars) self.vars_kge.extend(tail_mlp.vars) self.vars_kge.extend(cc_unit.vars)
def __init__(self, args, n_user, n_item, n_entity, n_relation, use_inner_product=True): super(MKR_model, self).__init__() # <Lower Model> self.args = args self.n_user = n_user self.n_item = n_item self.n_entity = n_entity self.n_relation = n_relation self.use_inner_product = use_inner_product # Init embeddings self.user_embeddings_lookup = nn.Embedding(self.n_user, self.args.dim) self.item_embeddings_lookup = nn.Embedding(self.n_item, self.args.dim) self.entity_embeddings_lookup = nn.Embedding(self.n_entity, self.args.dim) self.relation_embeddings_lookup = nn.Embedding(self.n_relation, self.args.dim) self.user_mlp = nn.Sequential() self.tail_mlp = nn.Sequential() self.cc_unit = nn.Sequential() for i_cnt in range(self.args.L): self.user_mlp.add_module('user_mlp{}'.format(i_cnt), Dense(self.args.dim, self.args.dim)) self.tail_mlp.add_module('tail_mlp{}'.format(i_cnt), Dense(self.args.dim, self.args.dim)) self.cc_unit.add_module('cc_unit{}'.format(i_cnt), CrossCompressUnit(self.args.dim)) # <Higher Model> self.kge_pred_mlp = Dense(self.args.dim * 2, self.args.dim) self.kge_mlp = nn.Sequential() for i_cnt in range(self.args.H - 1): self.kge_mlp.add_module('kge_mlp{}'.format(i_cnt), Dense(self.args.dim * 2, self.args.dim * 2)) if self.use_inner_product==False: self.rs_pred_mlp = Dense(self.args.dim * 2, 1) self.rs_mlp = nn.Sequential() for i_cnt in range(self.args.H - 1): self.rs_mlp.add_module('rs_mlp{}'.format(i_cnt), Dense(self.args.dim * 2, self.args.dim * 2))
def __init__(self, args, n_user, n_item, n_entity, n_relation): super(Test_model, self).__init__() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.args = args self.n_user = n_user self.n_item = n_item self.n_entity = n_entity self.n_relation = n_relation self.user_embeddings_lookup = nn.Embedding(self.n_user, self.args.dim).to(self.device) self.item_embeddings_lookup = nn.Embedding(self.n_item, self.args.dim).to(self.device) self.entity_embeddings_lookup = nn.Embedding(self.n_entity, self.args.dim).to(self.device) self.relation_embeddings_lookup = nn.Embedding(self.n_relation, self.args.dim).to(self.device) L_ = self.args.L_hgn d_ = self.args.dim self.feature_gate_item = nn.Linear(d_, d_).to(self.device) self.feature_gate_user = nn.Linear(d_, d_).to(self.device) self.instance_gate_item = Variable(torch.zeros(d_, 1).type(torch.FloatTensor),requires_grad=True).to(self.device) self.instance_gate_user = Variable(torch.zeros(d_, L_).type(torch.FloatTensor),requires_grad=True).to(self.device) self.instance_gate_item = torch.nn.init.xavier_uniform_(self.instance_gate_item) self.instance_gate_user = torch.nn.init.xavier_uniform_(self.instance_gate_user) self.user_embeddings_lookup.weight.data.normal_(0, 1.0 / self.user_embeddings_lookup.embedding_dim) self.item_embeddings_lookup.weight.data.normal_(0, 1.0 / self.item_embeddings_lookup.embedding_dim) self.entity_embeddings_lookup.weight.data.normal_(0, 1.0 / self.entity_embeddings_lookup.embedding_dim) self.relation_embeddings_lookup.weight.data.normal_(0, 1.0 / self.relation_embeddings_lookup.embedding_dim) self.W2 = nn.Embedding(self.n_item, d_, padding_idx=0).to(self.device) self.b2 = nn.Embedding(self.n_item, 1, padding_idx=0).to(self.device) self.W2.weight.data.normal_(0, 1.0 / self.W2.embedding_dim) self.b2.weight.data.zero_() # CNN Layer n_filters, filter_height = self.args.n_filters, self.args.filter_height self.horizontal_cnn_layer = nn.Conv2d(in_channels=1,out_channels=self.args.n_filters,kernel_size=(filter_height,d_),stride=1).to(self.device) self.vertical_cnn_layer = nn.Conv2d(in_channels=1,out_channels=self.args.n_filters,kernel_size=(L_,1),stride=1).to(self.device) # Maxpooling Layer self.maxpooling_layer = nn.MaxPool2d((1,L_-1), stride=1) # Fully Connected Layer self.W_FC_layer = Variable(torch.zeros(n_filters+(n_filters*d_), d_).type(torch.FloatTensor),requires_grad=True).to(self.device) self.W_FC_layer = torch.nn.init.xavier_uniform_(self.W_FC_layer) self.W_plun_FC_layer = Variable(torch.zeros(2*d_, d_).type(torch.FloatTensor),requires_grad=True).to(self.device) self.W_plun_FC_layer = torch.nn.init.xavier_uniform_(self.W_plun_FC_layer) self.tail_mlp = nn.Sequential() self.cc_unit = nn.Sequential() for i_cnt in range(self.args.L_mkr): self.tail_mlp.add_module('tail_mlp{}'.format(i_cnt), Dense(self.args.dim, self.args.dim,self.device)) self.cc_unit.add_module('cc_unit{}'.format(i_cnt), CrossCompressUnit(self.args.dim,self.device)) self.kge_pred_mlp = Dense(self.args.dim *2, self.args.dim,self.device) self.kge_mlp = nn.Sequential() for i_cnt in range(self.args.H_mkr-1): self.kge_mlp.add_module('kge_mlp{}'.format(i_cnt), Dense(self.args.dim *2, self.args.dim *2,self.device)) self.W_r = Variable(torch.zeros(d_, d_).type(torch.FloatTensor),requires_grad=True).to(self.device) self.W_r = torch.nn.init.xavier_uniform_(self.W_r)