Esempio n. 1
0
    def __init__(self,
                 num_users,
                 num_items,
                 embedding_dim=32,
                 user_embedding_layer=None,
                 item_embedding_layer=None,
                 sparse=False):

        super(BilinearNet, self).__init__()

        self.embedding_dim = embedding_dim

        if user_embedding_layer is not None:
            self.user_embeddings = user_embedding_layer
        else:
            self.user_embeddings = ScaledEmbedding(num_users,
                                                   embedding_dim,
                                                   sparse=sparse)

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items,
                                                   embedding_dim,
                                                   sparse=sparse)

        self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)
        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)
Esempio n. 2
0
    def __init__(self, num_users, num_items, embedding_dim=32, sparse=False):
        super().__init__()

        self.embedding_dim = embedding_dim

        self.user_embeddings = ScaledEmbedding(num_users, embedding_dim,
                                               sparse=sparse)
        self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
                                               sparse=sparse)
        self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)
        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)
    def __init__(self,
                 num_users,
                 num_items,
                 embedding_dim=32,
                 user_embedding_layer=None,
                 item_embedding_layer=None,
                 sparse=False):

        super(RankingNet, self).__init__()

        self.embedding_dim = embedding_dim

        # self.inputDim = embedding_dim * 3
        # self.hiddenDim = self.inputDim * 2
        # self.hiddenDim2 = embedding_dim * 3
        # self.l2Output = 1

        self.inputDim = embedding_dim * 2
        self.hiddenDim = self.inputDim * 1
        self.hiddenDim2 = embedding_dim * 2
        self.l2Output = 1

        if user_embedding_layer is not None:
            self.user_embeddings = user_embedding_layer
        else:
            self.user_embeddings = ScaledEmbedding(num_users,
                                                   embedding_dim,
                                                   sparse=sparse)

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items,
                                                   embedding_dim,
                                                   sparse=sparse)

        self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)
        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)

        # self.linear1 = nn.Linear(self.inputDim, self.hiddenDim)
        # self.bn1 = nn.BatchNorm1d(self.hiddenDim)
        # self.linear2 = nn.Linear(self.hiddenDim, self.l2Output)
        # self.dropout = nn.Dropout(p = 0.8)
        # self.output = nn.Sigmoid()

        self.linear1 = nn.Linear(self.inputDim, self.hiddenDim)
        self.bn1 = nn.BatchNorm1d(self.hiddenDim)
        self.linear2 = nn.Linear(self.hiddenDim, self.hiddenDim2)
        self.bn2 = nn.BatchNorm1d(self.hiddenDim2)
        self.linear3 = nn.Linear(self.hiddenDim2, self.l2Output)
        self.dropout = nn.Dropout(p=0.8)
        self.output = nn.Sigmoid()
    def __init__(self, num_users, num_items, embedding_dim=32,
                 num_components=4):

        super(EmbeddingMixtureNet, self).__init__()

        self.embedding_dim = embedding_dim
        self.num_components = num_components

        self.taste_embeddings = ScaledEmbedding(num_users, embedding_dim * num_components)
        self.attention_embeddings = ScaledEmbedding(num_users, embedding_dim * num_components)
        self.item_embeddings = ScaledEmbedding(num_items, embedding_dim)

        self.user_biases = ZeroEmbedding(num_users, 1)
        self.item_biases = ZeroEmbedding(num_items, 1)
    def __init__(self, num_users, num_items, embedding_dim=32,
                 num_components=4):

        super(NonlinearMixtureNet, self).__init__()

        self.embedding_dim = embedding_dim
        self.num_components = num_components

        self.user_embeddings = nn.Embedding(num_users, embedding_dim)
        self.item_embeddings = nn.Embedding(num_items, embedding_dim)

        self.user_biases = ZeroEmbedding(num_users, 1)
        self.item_biases = ZeroEmbedding(num_items, 1)

        self.mixture = MixtureComponent(embedding_dim, num_components)
Esempio n. 6
0
    def __init__(self, num_items, embedding_dim=32,
                 num_components=4,
                 diversity_penalty=1.0,
                 item_embedding_layer=None, sparse=False):

        super(DiversifiedMixtureLSTMNet, self).__init__()

        self.embedding_dim = embedding_dim
        self.num_components = num_components
        self._diversity_penalty = diversity_penalty

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse,
                                         padding_idx=PADDING_IDX)

        self.lstm = nn.LSTM(batch_first=True,
                            input_size=embedding_dim,
                            hidden_size=embedding_dim)
        self.projection = nn.Conv1d(embedding_dim,
                                    embedding_dim * self.num_components * 2,
                                    kernel_size=1)
Esempio n. 7
0
    def __init__(self,
                 num_items,
                 embedding_dim=32,
                 item_embedding_layer=None,
                 sparse=False):

        super(LSTMNet, self).__init__()

        self.embedding_dim = embedding_dim

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items,
                                                   embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items,
                                         1,
                                         sparse=sparse,
                                         padding_idx=PADDING_IDX)

        self.lstm = nn.LSTM(batch_first=True,
                            input_size=embedding_dim,
                            hidden_size=embedding_dim)
Esempio n. 8
0
    def __init__(self,
                 num_items,
                 embedding_dim=32,
                 kernel_width=5,
                 dilation=1,
                 num_layers=1,
                 sparse=False):
        super().__init__()

        self.embedding_dim = embedding_dim
        self.kernel_width = kernel_width
        self.dilation = dilation

        self.item_embeddings = ScaledEmbedding(num_items,
                                               embedding_dim,
                                               sparse=sparse,
                                               padding_idx=PADDING_IDX)
        self.item_biases = ZeroEmbedding(num_items,
                                         1,
                                         sparse=sparse,
                                         padding_idx=PADDING_IDX)

        self.cnn_layers = [
            nn.Conv2d(embedding_dim,
                      embedding_dim, (kernel_width, 1),
                      dilation=(dilation, 1)) for _ in range(num_layers)
        ]

        for i, layer in enumerate(self.cnn_layers):
            self.add_module('cnn_{}'.format(i), layer)
Esempio n. 9
0
    def __init__(self, num_items, embedding_dim=32, sparse=False):
        super(PoolNet, self).__init__()

        self.embedding_dim = embedding_dim

        self.item_embeddings = ScaledEmbedding(num_items,
                                               embedding_dim,
                                               sparse=sparse,
                                               padding_idx=PADDING_IDX)
        self.item_biases = ZeroEmbedding(num_items,
                                         1,
                                         sparse=sparse,
                                         padding_idx=PADDING_IDX)
Esempio n. 10
0
    def __init__(self,
                 num_items,
                 embedding_dim=32,
                 kernel_width=3,
                 dilation=1,
                 num_layers=1,
                 nonlinearity='tanh',
                 residual_connections=True,
                 sparse=False,
                 benchmark=True,
                 item_embedding_layer=None):

        super(CNNNet, self).__init__()

        cudnn.benchmark = benchmark

        self.embedding_dim = embedding_dim
        self.kernel_width = _to_iterable(kernel_width, num_layers)
        self.dilation = _to_iterable(dilation, num_layers)
        if nonlinearity == 'tanh':
            self.nonlinearity = F.tanh
        elif nonlinearity == 'relu':
            self.nonlinearity = F.relu
        else:
            raise ValueError('Nonlinearity must be one of (tanh, relu)')
        self.residual_connections = residual_connections

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items,
                                                   embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items,
                                         1,
                                         sparse=sparse,
                                         padding_idx=PADDING_IDX)

        self.cnn_layers = [
            nn.Conv2d(embedding_dim,
                      embedding_dim, (_kernel_width, 1),
                      dilation=(_dilation, 1))
            for (_kernel_width,
                 _dilation) in zip(self.kernel_width, self.dilation)
        ]

        for i, layer in enumerate(self.cnn_layers):
            self.add_module('cnn_{}'.format(i), layer)
    def __init__(self, num_users, num_items, embedding_dim=32,
                 projection_scale=1.0,
                 num_components=4):

        super(MixtureNet, self).__init__()

        self.embedding_dim = embedding_dim
        self.num_components = num_components
        self.projection_scale = projection_scale

        self.user_embeddings = ScaledEmbedding(num_users, embedding_dim)
        self.item_embeddings = ScaledEmbedding(num_items, embedding_dim)

        self.user_biases = ZeroEmbedding(num_users, 1)
        self.item_biases = ZeroEmbedding(num_items, 1)

        self.taste_projection = nn.Linear(embedding_dim,
                                          embedding_dim * self.num_components, bias=False)
        self.attention_projection = nn.Linear(embedding_dim,
                                              embedding_dim * self.num_components, bias=False)

        for layer in (self.taste_projection, self.attention_projection):
            torch.nn.init.xavier_normal(layer.weight, self.projection_scale)
Esempio n. 12
0
    def __init__(self,
                 num_users,
                 num_items,
                 embedding_dim=32,
                 n_layers=2,
                 sparse=None):
        super(DeepNet, self).__init__()
        self.emb_dim = embedding_dim
        #self.user_emb = nn.Embedding(num_users, self.emb_dim)
        #self.item_emb = nn.Embedding(num_items, self.emb_dim)
        self.user_emb = ScaledEmbedding(num_users,
                                        embedding_dim,
                                        sparse=sparse)
        self.item_emb = ScaledEmbedding(num_items,
                                        embedding_dim,
                                        sparse=sparse)

        self.mlp = nn.Sequential(nn.Linear(self.emb_dim, 16), nn.ReLU(),
                                 nn.Dropout(p=0.8), nn.Linear(16, 32),
                                 nn.ReLU(), nn.Dropout(0.8))
        self.user_biases = ZeroEmbedding(num_users, 1)
        self.item_biases = ZeroEmbedding(num_items, 1)
        self.out = nn.ReLU()
Esempio n. 13
0
    def __init__(self, num_items, embedding_dim=32, item_embedding_layer=None):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.item_biases = ZeroEmbedding(num_items, 1, padding_idx=PADDING_IDX)
        if item_embedding_layer is not None:
            self.item_embedding = item_embedding_layer
        else:
            self.item_embedding = ScaledEmbedding(num_items,
                                                  embedding_dim,
                                                  padding_idx=PADDING_IDX)

        # why input_size and hidden_size needs to be both embedding_dim ??
        self.lstm = nn.LSTM(input_size=embedding_dim,
                            hidden_size=embedding_dim,
                            batch_first=True)
Esempio n. 14
0
    def __init__(self, num_items, embedding_dim=32,
                 item_embedding_layer=None, sparse=False):

        super(PoolNet, self).__init__()

        self.embedding_dim = embedding_dim

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse,
                                         padding_idx=PADDING_IDX)
Esempio n. 15
0
    def __init__(self, num_items, embedding_dim=32, sparse=False):
        super().__init__()

        self.embedding_dim = embedding_dim

        self.item_embeddings = ScaledEmbedding(num_items,
                                               embedding_dim,
                                               sparse=sparse,
                                               padding_idx=PADDING_IDX)
        self.item_biases = ZeroEmbedding(num_items,
                                         1,
                                         sparse=sparse,
                                         padding_idx=PADDING_IDX)

        self.lstm = nn.LSTM(batch_first=True,
                            input_size=embedding_dim,
                            hidden_size=embedding_dim)
Esempio n. 16
0
    def __init__(self,
                 num_items,
                 embedding_dim=32,
                 item_embedding_layer=None,
                 sparse=False,
                 layers=[],
                 nonlinearity='tanh'):

        super(PoolNet, self).__init__()

        self.embedding_dim = embedding_dim

        if nonlinearity == 'tanh':
            self.nonlinearity = F.tanh
        elif nonlinearity == 'relu':
            self.nonlinearity = F.relu
        else:
            raise ValueError('Nonlinearity must be one of (tanh, relu)')

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items,
                                                   embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items,
                                         1,
                                         sparse=sparse,
                                         padding_idx=PADDING_IDX)
        self.fc_layers = None
        if layers and len(layers) > 0:
            self.fc_layers = torch.nn.ModuleList()
            for idx, (in_size,
                      out_size) in enumerate(zip(layers[:-1], layers[1:])):
                self.fc_layers.append(
                    TimeDistributed(torch.nn.Linear(in_size, out_size)))
Esempio n. 17
0
    def __init__(self, num_items, embedding_dim=32,
                 item_embedding_layer=None, sparse=False):

        super(CustomPoolNet, self).__init__()

        self.embedding_dim = embedding_dim

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            # Return embedding vector for each item, size 3
            # Examples: embedding([1,2,4,5])
            # tensor(
            # [[[-0.0251, -1.6902,  0.7172],
            # [-0.6431,  0.0748,  0.6969],
            # [ 1.4970,  1.3448, -0.9685],
            # [-0.3677, -2.7265, -0.1685]]])
            self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse,
                                         padding_idx=PADDING_IDX)
Esempio n. 18
0
    def __init__(self, num_items, embedding_dim=32,
                 item_embedding_layer=None, sparse=False):

        super(mLSTMNet, self).__init__()

        self.embedding_dim = embedding_dim

        if item_embedding_layer is not None:
            self.item_embeddings = item_embedding_layer
        else:
            self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
                                                   padding_idx=PADDING_IDX,
                                                   sparse=sparse)

        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse,
                                         padding_idx=PADDING_IDX)

        h_init = torch.zeros(embedding_dim)
        h_init.normal_(0, 1.0 / self.embedding_dim)
        self.h_init = nn.Parameter(h_init, requires_grad=True)

        self.mlstm = mLSTM(input_size=embedding_dim,
                           hidden_size=embedding_dim)