Пример #1
0
 def _get_negative_prediction(self, user_ids):
     negative_items = sample_items(self._num_items,
                                   len(user_ids),
                                   random_state=self._random_state)
     negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda)
     negative_prediction = self._net(user_ids, negative_var)
     return negative_prediction
Пример #2
0
    def _get_negative_prediction(self, shape, user_representation):

        negative_items = sample_items(self._num_items, shape, random_state=self._random_state)
        negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda)

        negative_prediction = self._net(user_representation, negative_var)

        return negative_prediction
Пример #3
0
    def _get_negative_items(self, user_ids):

        negative_items = sample_items(
            self._num_items,
            len(user_ids),
            random_state=self._random_state)
        negative_var = Variable(
            gpu(torch.from_numpy(negative_items), self._use_cuda)
        )

        return negative_var
Пример #4
0
    def _get_negative_prediction(self, shape, user_representation):

        negative_items = sample_items(
            self._num_items,
            shape,
            random_state=self._random_state)
        negative_var = Variable(
            gpu(torch.from_numpy(negative_items), self._use_cuda)
        )
        negative_prediction = self._net(user_representation, negative_var)

        return negative_prediction
Пример #5
0
    def _get_negative_prediction(self, user_ids):

        negative_items = sample_items(
            self._num_items,
            len(user_ids),
            random_state=self._random_state)
        negative_var = Variable(
            gpu(torch.from_numpy(negative_items), self._use_cuda)
        )
        negative_prediction = self._net(user_ids, negative_var)

        return negative_prediction
Пример #6
0
    def fit(self, interactions, verbose=False):
        """
        Fit the model.

        Parameters
        ----------

        interactions: :class:`spotlight.interactions.Interactions`
            The input dataset.
        """

        user_ids = interactions.user_ids.astype(np.int64)
        item_ids = interactions.item_ids.astype(np.int64)

        (self._num_users, self._num_items) = (interactions.num_users,
                                              interactions.num_items)

        self._net = gpu(
            BilinearNet(self._num_users,
                        self._num_items,
                        self._embedding_dim,
                        sparse=self._sparse), self._use_cuda)

        if self._optimizer is None:
            self._optimizer = optim.Adam(self._net.parameters(),
                                         weight_decay=self._l2,
                                         lr=self._learning_rate)

        if self._loss == 'pointwise':
            loss_fnc = pointwise_loss
        elif self._loss == 'bpr':
            loss_fnc = bpr_loss
        else:
            loss_fnc = hinge_loss

        for epoch_num in range(self._n_iter):

            users, items = shuffle(user_ids,
                                   item_ids,
                                   random_state=self._random_state)

            user_ids_tensor = gpu(torch.from_numpy(users), self._use_cuda)
            item_ids_tensor = gpu(torch.from_numpy(items), self._use_cuda)

            epoch_loss = 0.0

            for (batch_user,
                 batch_item) in minibatch(user_ids_tensor,
                                          item_ids_tensor,
                                          batch_size=self._batch_size):

                user_var = Variable(batch_user)
                item_var = Variable(batch_item)
                positive_prediction = self._net(user_var, item_var)

                if self._loss == 'adaptive_hinge':
                    negative_prediction = self._get_adaptive_negatives(
                        user_var)
                else:
                    negative_items = sample_items(
                        self._num_items,
                        len(batch_user),
                        random_state=self._random_state)
                    negative_var = Variable(
                        gpu(torch.from_numpy(negative_items)))
                    negative_prediction = self._net(user_var, negative_var)

                self._optimizer.zero_grad()

                loss = loss_fnc(positive_prediction, negative_prediction)
                epoch_loss += loss.data[0]

                loss.backward()
                self._optimizer.step()

            if verbose:
                print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))
Пример #7
0
    def fit(self, interactions, verbose=False):
        """
        Fit the model.

        Parameters
        ----------

        interactions: :class:`spotlight.interactions.SequenceInteractions`
            The input sequence dataset.
        """

        sequences = interactions.sequences.astype(np.int64)

        self._num_items = interactions.num_items

        if self._representation == 'pooling':
            self._net = PoolNet(self._num_items,
                                self._embedding_dim,
                                sparse=self._sparse)
        elif self._representation == 'cnn':
            self._net = CNNNet(self._num_items,
                               self._embedding_dim,
                               sparse=self._sparse)
        elif self._representation == 'lstm':
            self._net = LSTMNet(self._num_items,
                                self._embedding_dim,
                                sparse=self._sparse)
        else:
            self._net = self._representation

        if self._optimizer is None:
            self._optimizer = optim.Adam(self._net.parameters(),
                                         weight_decay=self._l2,
                                         lr=self._learning_rate)

        if self._loss == 'pointwise':
            loss_fnc = pointwise_loss
        elif self._loss == 'bpr':
            loss_fnc = bpr_loss
        else:
            loss_fnc = hinge_loss

        for epoch_num in range(self._n_iter):

            sequences = shuffle(sequences, random_state=self._random_state)

            sequences_tensor = gpu(torch.from_numpy(sequences), self._use_cuda)

            epoch_loss = 0.0

            for batch_sequence in minibatch(sequences_tensor,
                                            batch_size=self._batch_size):

                sequence_var = Variable(batch_sequence)

                user_representation, _ = self._net.user_representation(
                    sequence_var)

                positive_prediction = self._net(user_representation,
                                                sequence_var)

                if self._loss == 'adaptive_hinge':
                    raise NotImplementedError
                else:
                    negative_items = sample_items(
                        self._num_items,
                        batch_sequence.size(),
                        random_state=self._random_state)
                    negative_var = Variable(
                        gpu(torch.from_numpy(negative_items)))
                    negative_prediction = self._net(user_representation,
                                                    negative_var)

                self._optimizer.zero_grad()

                loss = loss_fnc(positive_prediction,
                                negative_prediction,
                                mask=(sequence_var != PADDING_IDX))
                epoch_loss += loss.data[0]

                loss.backward()
                self._optimizer.step()

            if verbose:
                print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))