def __init__(self, emb_size, max_his): super().__init__() self.max_his = max_his self.p_embeddings = nn.Embedding(max_his + 1, emb_size) self.transformer = layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=1, kq_same=False)
def __init__(self, emb_size, max_his, num_layers=2, num_heads=2): super().__init__() self.p_embeddings = nn.Embedding(max_his + 1, emb_size) self.transformer_block = nn.ModuleList([ layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=num_heads) for _ in range(num_layers) ])
def _define_params(self): self.i_embeddings = nn.Embedding(self.item_num, self.emb_size) self.p_embeddings = nn.Embedding(self.max_his + 1, self.emb_size) self.transformer_block = nn.ModuleList([ layers.TransformerLayer(d_model=self.emb_size, d_ff=self.emb_size, n_heads=self.num_heads, dropout=self.dropout, kq_same=False) for _ in range(self.num_layers) ])
def __init__(self, k, item_num, emb_size, attn_size, max_his, add_pos): super(MultiInterestExtractor, self).__init__() self.max_his = max_his self.add_pos = add_pos self.i_embeddings = nn.Embedding(item_num, emb_size) if self.add_pos: self.p_embeddings = nn.Embedding(max_his + 1, emb_size) self.W1 = nn.Linear(emb_size, attn_size) self.W2 = nn.Linear(attn_size, k) self.transformer = layers.TransformerLayer(d_model=emb_size, d_ff=emb_size, n_heads=1, kq_same=False)