def __init__(self,
              n_items,
              h_dim_size,
              gru_hidden_size,
              n_layers=3,
              use_cuda=False,
              batch_size=32,
              use_logit=False):
     super(SRNN, self).__init__()
     self.batch_size = batch_size
     self.n_items = n_items
     self.h_dim_size = h_dim_size
     self.gru_hidden_size = gru_hidden_size
     self.n_layers = n_layers
     self.device = torch.device('cuda' if use_cuda else 'cpu')
     self.use_cuda = use_cuda
     self.gru = nn.GRU(input_size=self.h_dim_size,
                       hidden_size=self.h_dim_size,
                       num_layers=self.n_layers)
     self.activation = nn.Tanh()
     self.out = nn.Linear(h_dim_size, 1)
     self.embedding = EmbeddingGrad(n_items, h_dim_size, use_cuda=use_cuda)
     self.use_logit = use_logit
     self.logistic = torch.nn.Sigmoid()
     if use_cuda:
         self = self.cuda()
Esempio n. 2
0
    def __init__(self, n_items, h_dim_size, wide, wide_dim, fc1=64, fc2=32,):
        super(WideAndDeepPretrained, self).__init__()
        self.n_items = n_items
        self.h_dim_size = h_dim_size


        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.fc_1 = nn.Linear(h_dim_size, fc1)
        self.fc_2 = nn.Linear(fc1, fc2)

        self.wide = EmbeddingGrad(n_items, wide_dim, init_embed=wide)



        self.output_layer = nn.Linear(wide_dim + fc2, 1)
Esempio n. 3
0
    def __init__(self, config):
        super(MLP, self).__init__()
        self.config = config
        self.num_users = config['num_users']
        self.num_items = config['num_items']
        self.latent_dim = config['latent_dim']
        self.use_cuda = config['use_cuda']
        self.use_logit = config["use_logit"]
        self.device = torch.device('cuda' if self.use_cuda else 'cpu')

        self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users,
                                                 embedding_dim=self.latent_dim)
        self.embedding = EmbeddingGrad(num_embedding=self.num_items,
                                       embedding_dim=self.latent_dim)

        self.fc_layers = torch.nn.ModuleList()
        for idx, (in_size, out_size) in enumerate(
                zip(config['layers'][:-1], config['layers'][1:])):
            self.fc_layers.append(torch.nn.Linear(in_size, out_size))

        self.affine_output = torch.nn.Linear(in_features=config['layers'][-1],
                                             out_features=1)
        self.logistic = torch.nn.Sigmoid()

        if self.use_cuda:
            self = self.cuda()
Esempio n. 4
0
    def __init__(self,
                 n_users,
                 n_items,
                 n_factors=40,
                 dropout_p=0,
                 sparse=False,
                 use_logit=False,
                 use_cuda=False):
        """
        Parameters
        ----------
        n_users : int
            Number of users
        n_items : int
            Number of items
        n_factors : int
            Number of latent factors (or embeddings or whatever you want to
            call it).
        dropout_p : float
            p in nn.Dropout module. Probability of dropout.
        sparse : bool
            Whether or not to treat embeddings as sparse. NOTE: cannot use
            weight decay on the optimizer if sparse=True. Also, can only use
            Adagrad.
        """
        super(MatrixFactorization, self).__init__()
        self.n_users = n_users
        self.n_items = n_items
        self.n_factors = n_factors
        self.user_biases = nn.Embedding(n_users, 1, sparse=sparse)
        self.item_biases = EmbeddingGrad(n_items, 1)
        self.user_embeddings = nn.Embedding(n_users, n_factors, sparse=sparse)
        self.item_embeddings = EmbeddingGrad(num_embedding=self.n_items, embedding_dim=self.n_factors)

        self.dropout_p = dropout_p
        self.dropout = nn.Dropout(p=self.dropout_p)
        self.logistic = torch.nn.Sigmoid()
        self.sparse = sparse
        self.use_logit = use_logit
        self.use_cuda = use_cuda
        self.device = torch.device('cuda' if use_cuda else 'cpu')

        if use_cuda:
            self = self.cuda()
    def __init__(self, n_items, h_dim_size, use_cuda=False):
        super(UtilityEncoder, self).__init__()
        self.n_items = n_items
        self.h_dim_size = h_dim_size
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        self.use_cuda = use_cuda

        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.weights = nn.Linear(h_dim_size, 1)

        if use_cuda:
            self = self.cuda()
class NeuralUtility(nn.Module):
    def __init__(self, backbone, n_items, h_dim_size, use_embedding=True):
        super(NeuralUtility, self).__init__()

        self.use_embedding = use_embedding
        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.backbone = backbone

    def get_input_grad(self, indices):
        """
        Get gradients with respect to inputs
        :param indices: (ndarray) array of item indices
        :return: (tensor) tensor of gradients with respect to inputs
        """
        if indices.ndim == 1:
            indices = indices.reshape(-1, 1)

        dims = [d for d in indices.shape] + [1]
        idx_tensor = torch.LongTensor(indices).reshape(dims)

        if self.use_embedding:
            grad = self.embedding.get_grad(indices)
        else:
            grad = self.backbone.embedding.get_grad(indices)

        grad_at_idx = torch.gather(grad, -1, idx_tensor)
        return torch.squeeze(grad_at_idx)

    def forward(self, users, items):

        if self.use_embedding:
            e_i = self.embedding.forward(users)
            y_hat = self.backbone.forward(e_i)
        else:
            y_hat = self.backbone.forward(users, items)

        return y_hat

    def predict(self, users, items):
        return self.forward(users, items)
    def __init__(self, n_users, n_items, h_dim_size, use_cuda, use_logit=False):
        super(GMF, self).__init__()
        self.num_users = n_users
        self.num_items = n_items
        self.latent_dim = h_dim_size
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        self.use_cuda = use_cuda
        self.use_logit = use_logit
        self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
        self.embedding = EmbeddingGrad(num_embedding=self.num_items, embedding_dim=self.latent_dim)
        self.affine_output = torch.nn.Linear(in_features=self.latent_dim, out_features=1)
        self.logistic = torch.nn.Sigmoid()

        if use_cuda:
            self = self.cuda()
Esempio n. 8
0
    def __init__(self, n_items, h_dim_size, fc1=64, fc2=32, use_cuda=False, use_embedding=True, use_logit=False):
        super(WideAndDeep, self).__init__()
        self.n_items = n_items
        self.h_dim_size = h_dim_size
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        self.use_cuda = use_cuda
        self.use_logit = use_logit
        self.use_embedding = use_embedding
        self.embedding = EmbeddingGrad(n_items, h_dim_size, use_cuda=use_cuda)
        self.fc_1 = nn.Linear(h_dim_size, fc1)
        self.fc_2 = nn.Linear(fc1, fc2)
        self.output_layer = nn.Linear(n_items + fc2, 1)
        self.logistic = torch.nn.Sigmoid()


        if use_cuda:
            self = self.cuda()
Esempio n. 9
0
 def __init__(self, field_dims, output_dim=1):
     super().__init__()
     self.fc = EmbeddingGrad(sum(field_dims), output_dim)
     self.bias = torch.nn.Parameter(torch.zeros((output_dim, )))
     self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]),
                             dtype=np.long)
Esempio n. 10
0
 def __init__(self, field_dims, embed_dim):
     super().__init__()
     self.embedding = EmbeddingGrad(sum(field_dims), embed_dim)
     self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]),
                             dtype=np.long)
     torch.nn.init.xavier_uniform_(self.embedding.weights.weight.data)
class SRNN(nn.Module):
    def __init__(self,
                 n_items,
                 h_dim_size,
                 gru_hidden_size,
                 n_layers=3,
                 use_cuda=False,
                 batch_size=32,
                 use_logit=False):
        super(SRNN, self).__init__()
        self.batch_size = batch_size
        self.n_items = n_items
        self.h_dim_size = h_dim_size
        self.gru_hidden_size = gru_hidden_size
        self.n_layers = n_layers
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        self.use_cuda = use_cuda
        self.gru = nn.GRU(input_size=self.h_dim_size,
                          hidden_size=self.h_dim_size,
                          num_layers=self.n_layers)
        self.activation = nn.Tanh()
        self.out = nn.Linear(h_dim_size, 1)
        self.embedding = EmbeddingGrad(n_items, h_dim_size, use_cuda=use_cuda)
        self.use_logit = use_logit
        self.logistic = torch.nn.Sigmoid()
        if use_cuda:
            self = self.cuda()

    def forward(self, users, items):
        embedded = self.embedding(items)
        #embedded = embedded.unsqueeze(0)
        o, h = self.gru(torch.transpose(torch.squeeze(embedded), 0, 1))
        # o = o.view(-1, o.size(-1))

        y_hat = torch.squeeze(self.activation(self.out(o)))

        if self.use_logit:
            y_hat = self.logistic(y_hat)
        return torch.transpose(y_hat, 0, 1)

    def one_hot(self, input):
        self.one_hot_embedding.zero_()
        index = input.view(-1, 1)
        one_hot = self.one_hot_embedding.scatter_(1, index, 1)
        return one_hot

    def init_onehot_embedding(self):
        onehot = torch.FloatTensor(self.batch_size, self.n_items)
        onehot = onehot.to(self.device)
        return onehot

    def get_input_grad(self, indices):
        """
        Get gradients with respect to inputs
        :param indices: (ndarray) array of item indices
        :return: (tensor) tensor of gradients with respect to inputs
        """
        if indices.ndim == 1:
            indices = indices.reshape(-1, 1)

        dims = [d for d in indices.shape] + [1]
        idx_tensor = torch.LongTensor(indices).reshape(dims)

        grad = self.embedding.get_grad(indices)
        grad_at_idx = torch.gather(grad, -1, idx_tensor)
        return torch.squeeze(grad_at_idx)

    def predict(self, X_test):

        n_samples = X_test.shape[0]
        h = torch.zeros(self.n_layers, n_samples,
                        self.h_dim_size).to(self.device)

        return self.forward(X_test, hidden=h)
    def __init__(self, backbone, n_items, h_dim_size, use_embedding=True):
        super(NeuralUtility, self).__init__()

        self.use_embedding = use_embedding
        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.backbone = backbone
Esempio n. 13
0
class WideAndDeepPretrained(nn.Module):

    def __init__(self, n_items, h_dim_size, wide, wide_dim, fc1=64, fc2=32,):
        super(WideAndDeepPretrained, self).__init__()
        self.n_items = n_items
        self.h_dim_size = h_dim_size


        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.fc_1 = nn.Linear(h_dim_size, fc1)
        self.fc_2 = nn.Linear(fc1, fc2)

        self.wide = EmbeddingGrad(n_items, wide_dim, init_embed=wide)



        self.output_layer = nn.Linear(wide_dim + fc2, 1)

    def get_input_grad(self, indices):
        """
        Get gradients with respect to inputs
        :param indices: (ndarray) array of item indices
        :return: (tensor) tensor of gradients with respect to inputs
        """
        if indices.ndim == 1:
            indices = indices.reshape(-1, 1)


        dims = [d for d in indices.shape] + [1]
        idx_tensor = torch.LongTensor(indices).reshape(dims)

        grad = self.embedding.get_grad(indices)
        grad_at_idx = torch.gather(grad, -1, idx_tensor)
        return torch.squeeze(grad_at_idx)


    def _forward_set(self, x):
        h = self.embedding(x)
        h = F.relu(self.fc_1(h))
        h = F.relu(self.fc_2(h))

        wide = self.wide(x)
        h = torch.cat([h, wide], dim=-1)

        y_hat = self.output_layer(h)
        return y_hat


    def forward(self, x, x_c=None, x_s=None):

        y_hat = self._forward_set(x)

        if x_c is not None and x_s is not None:

            y_hat_c = self._forward_set(x_c)
            y_hat_s = self._forward_set(x_s)

            return y_hat, torch.squeeze(y_hat_c), torch.squeeze(y_hat_s)

        else:

            return y_hat


    def fit(self, X_train, y_train, batch_size, k, lr, n_epochs, loss_step, eps):
        pass


    def predict(self, X_test):
        pass