class NeuralUtility(nn.Module):
    def __init__(self, backbone, n_items, h_dim_size, use_embedding=True):
        super(NeuralUtility, self).__init__()

        self.use_embedding = use_embedding
        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.backbone = backbone

    def get_input_grad(self, indices):
        """
        Get gradients with respect to inputs
        :param indices: (ndarray) array of item indices
        :return: (tensor) tensor of gradients with respect to inputs
        """
        if indices.ndim == 1:
            indices = indices.reshape(-1, 1)

        dims = [d for d in indices.shape] + [1]
        idx_tensor = torch.LongTensor(indices).reshape(dims)

        if self.use_embedding:
            grad = self.embedding.get_grad(indices)
        else:
            grad = self.backbone.embedding.get_grad(indices)

        grad_at_idx = torch.gather(grad, -1, idx_tensor)
        return torch.squeeze(grad_at_idx)

    def forward(self, users, items):

        if self.use_embedding:
            e_i = self.embedding.forward(users)
            y_hat = self.backbone.forward(e_i)
        else:
            y_hat = self.backbone.forward(users, items)

        return y_hat

    def predict(self, users, items):
        return self.forward(users, items)
class SRNN(nn.Module):
    def __init__(self,
                 n_items,
                 h_dim_size,
                 gru_hidden_size,
                 n_layers=3,
                 use_cuda=False,
                 batch_size=32,
                 use_logit=False):
        super(SRNN, self).__init__()
        self.batch_size = batch_size
        self.n_items = n_items
        self.h_dim_size = h_dim_size
        self.gru_hidden_size = gru_hidden_size
        self.n_layers = n_layers
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        self.use_cuda = use_cuda
        self.gru = nn.GRU(input_size=self.h_dim_size,
                          hidden_size=self.h_dim_size,
                          num_layers=self.n_layers)
        self.activation = nn.Tanh()
        self.out = nn.Linear(h_dim_size, 1)
        self.embedding = EmbeddingGrad(n_items, h_dim_size, use_cuda=use_cuda)
        self.use_logit = use_logit
        self.logistic = torch.nn.Sigmoid()
        if use_cuda:
            self = self.cuda()

    def forward(self, users, items):
        embedded = self.embedding(items)
        #embedded = embedded.unsqueeze(0)
        o, h = self.gru(torch.transpose(torch.squeeze(embedded), 0, 1))
        # o = o.view(-1, o.size(-1))

        y_hat = torch.squeeze(self.activation(self.out(o)))

        if self.use_logit:
            y_hat = self.logistic(y_hat)
        return torch.transpose(y_hat, 0, 1)

    def one_hot(self, input):
        self.one_hot_embedding.zero_()
        index = input.view(-1, 1)
        one_hot = self.one_hot_embedding.scatter_(1, index, 1)
        return one_hot

    def init_onehot_embedding(self):
        onehot = torch.FloatTensor(self.batch_size, self.n_items)
        onehot = onehot.to(self.device)
        return onehot

    def get_input_grad(self, indices):
        """
        Get gradients with respect to inputs
        :param indices: (ndarray) array of item indices
        :return: (tensor) tensor of gradients with respect to inputs
        """
        if indices.ndim == 1:
            indices = indices.reshape(-1, 1)

        dims = [d for d in indices.shape] + [1]
        idx_tensor = torch.LongTensor(indices).reshape(dims)

        grad = self.embedding.get_grad(indices)
        grad_at_idx = torch.gather(grad, -1, idx_tensor)
        return torch.squeeze(grad_at_idx)

    def predict(self, X_test):

        n_samples = X_test.shape[0]
        h = torch.zeros(self.n_layers, n_samples,
                        self.h_dim_size).to(self.device)

        return self.forward(X_test, hidden=h)
Exemplo n.º 3
0
class WideAndDeepPretrained(nn.Module):

    def __init__(self, n_items, h_dim_size, wide, wide_dim, fc1=64, fc2=32,):
        super(WideAndDeepPretrained, self).__init__()
        self.n_items = n_items
        self.h_dim_size = h_dim_size


        self.embedding = EmbeddingGrad(n_items, h_dim_size)
        self.fc_1 = nn.Linear(h_dim_size, fc1)
        self.fc_2 = nn.Linear(fc1, fc2)

        self.wide = EmbeddingGrad(n_items, wide_dim, init_embed=wide)



        self.output_layer = nn.Linear(wide_dim + fc2, 1)

    def get_input_grad(self, indices):
        """
        Get gradients with respect to inputs
        :param indices: (ndarray) array of item indices
        :return: (tensor) tensor of gradients with respect to inputs
        """
        if indices.ndim == 1:
            indices = indices.reshape(-1, 1)


        dims = [d for d in indices.shape] + [1]
        idx_tensor = torch.LongTensor(indices).reshape(dims)

        grad = self.embedding.get_grad(indices)
        grad_at_idx = torch.gather(grad, -1, idx_tensor)
        return torch.squeeze(grad_at_idx)


    def _forward_set(self, x):
        h = self.embedding(x)
        h = F.relu(self.fc_1(h))
        h = F.relu(self.fc_2(h))

        wide = self.wide(x)
        h = torch.cat([h, wide], dim=-1)

        y_hat = self.output_layer(h)
        return y_hat


    def forward(self, x, x_c=None, x_s=None):

        y_hat = self._forward_set(x)

        if x_c is not None and x_s is not None:

            y_hat_c = self._forward_set(x_c)
            y_hat_s = self._forward_set(x_s)

            return y_hat, torch.squeeze(y_hat_c), torch.squeeze(y_hat_s)

        else:

            return y_hat


    def fit(self, X_train, y_train, batch_size, k, lr, n_epochs, loss_step, eps):
        pass


    def predict(self, X_test):
        pass