Esempio n. 1
0
    def _initialize(self, interactions):

        (self._num_users, self._num_items) = (interactions.num_users,
                                              interactions.num_items)

        if self._representation is not None:
            self._net = gpu(self._representation, self._use_cuda)
        else:
            self._net = gpu(
                BilinearNet(self._num_users,
                            self._num_items,
                            self._embedding_dim,
                            sparse=self._sparse), self._use_cuda)

        if self._optimizer_func is None:
            self._optimizer = optim.Adam(self._net.parameters(),
                                         weight_decay=self._l2,
                                         lr=self._learning_rate)
        else:
            self._optimizer = self._optimizer_func(self._net.parameters())

        if self._loss == 'regression':
            self._loss_func = regression_loss
        elif self._loss == 'poisson':
            self._loss_func = poisson_loss
        elif self._loss == 'logistic':
            self._loss_func = logistic_loss
        else:
            raise ValueError('Unknown loss: {}'.format(self._loss))
Esempio n. 2
0
    def _initialize(self, interactions):

        (self._num_users, self._num_items) = (interactions.num_users,
                                              interactions.num_items)

        if self._representation is not None:
            self._net = gpu(self._representation, self._use_cuda)
        else:
            self._net = gpu(
                BilinearNet(self._num_users,
                            self._num_items,
                            self._embedding_dim,
                            sparse=self._sparse), self._use_cuda)

        if self._optimizer_func is None:
            self._optimizer = optim.Adam(self._net.parameters(),
                                         weight_decay=self._l2,
                                         lr=self._learning_rate)
        else:
            self._optimizer = self._optimizer_func(self._net.parameters())

        if self._loss == 'pointwise':
            self._loss_func = pointwise_loss
        elif self._loss == 'bpr':
            self._loss_func = bpr_loss
        elif self._loss == 'hinge':
            self._loss_func = hinge_loss
        else:
            self._loss_func = adaptive_hinge_loss
Esempio n. 3
0
    def fit(self, interactions, verbose=False):
        user_ids = interactions.user_ids.astype(np.int64)
        item_ids = interactions.item_ids.astype(np.int64)

        if not self._initialized:
            self._initialize(interactions)

        self._check_input(user_ids, item_ids)

        for epoch_num in range(self._n_iter):

            users, items = shuffle(user_ids,
                                   item_ids,
                                   random_state=self._random_state)

            user_ids_tensor = gpu(torch.from_numpy(users), self._use_cuda)
            item_ids_tensor = gpu(torch.from_numpy(items), self._use_cuda)

            epoch_loss = 0.0

            for (minibatch_num, (batch_user, batch_item)) in enumerate(
                    minibatch(user_ids_tensor,
                              item_ids_tensor,
                              batch_size=self._batch_size)):

                positive_prediction = self._net(batch_user, batch_item)

                if self._loss == 'adaptive_hinge':
                    negative_prediction = self._get_multiple_negative_predictions(
                        batch_user, n=self._num_negative_samples)
                else:
                    negative_prediction = self._get_negative_prediction(
                        batch_user)

                self._optimizer.zero_grad()

                loss = self._loss_func(positive_prediction,
                                       negative_prediction)
                epoch_loss += loss.item()

                loss.backward()
                self._optimizer.step()

            epoch_loss /= minibatch_num + 1

            if verbose:
                print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))

            if np.isnan(epoch_loss) or epoch_loss == 0.0:
                raise ValueError(
                    'Degenerate epoch loss: {}'.format(epoch_loss))
Esempio n. 4
0
    def fit(self, interactions, verbose=False):
        user_ids = interactions.user_ids.astype(np.int64)
        item_ids = interactions.item_ids.astype(np.int64)

        if not self._initialized:
            self._initialize(interactions)

        self._check_input(user_ids, item_ids)

        for epoch_num in range(self._n_iter):

            users, items, ratings = shuffle(user_ids,
                                            item_ids,
                                            interactions.ratings,
                                            random_state=self._random_state)

            user_ids_tensor = gpu(torch.from_numpy(users), self._use_cuda)
            item_ids_tensor = gpu(torch.from_numpy(items), self._use_cuda)
            ratings_tensor = gpu(torch.from_numpy(ratings), self._use_cuda)

            epoch_loss = 0.0

            for (minibatch_num, (batch_user, batch_item,
                                 batch_ratings)) in enumerate(
                                     minibatch(user_ids_tensor,
                                               item_ids_tensor,
                                               ratings_tensor,
                                               batch_size=self._batch_size)):

                predictions = self._net(batch_user, batch_item)

                if self._loss == 'poisson':
                    predictions = torch.exp(predictions)

                self._optimizer.zero_grad()

                loss = self._loss_func(batch_ratings, predictions.double())
                epoch_loss += loss.item()

                loss.backward()
                self._optimizer.step()

            epoch_loss /= minibatch_num + 1

            if verbose:
                print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))

            if np.isnan(epoch_loss) or epoch_loss == 0.0:
                raise ValueError(
                    'Degenerate epoch loss: {}'.format(epoch_loss))
Esempio n. 5
0
    def fit(self, interactions, verbose=False):
        sequences = interactions.sequences.astype(np.int64)

        if not self._initialized:
            self._initialize(interactions)

        self._check_input(sequences)

        for epoch_num in range(self._n_iter):

            sequences = shuffle(sequences,
                                random_state=self._random_state)

            sequences_tensor = gpu(torch.from_numpy(sequences),
                                   self._use_cuda)

            epoch_loss = 0.0

            for minibatch_num, batch_sequence in enumerate(minibatch(sequences_tensor,
                                                                     batch_size=self._batch_size)):

                sequence_var = batch_sequence

                user_representation, _ = self._net.user_representation(
                    sequence_var
                )

                positive_prediction = self._net(user_representation,
                                                sequence_var)

                if self._loss == 'adaptive_hinge':
                    negative_prediction = self._get_multiple_negative_predictions(
                        sequence_var.size(),
                        user_representation,
                        n=self._num_negative_samples)
                else:
                    negative_prediction = self._get_negative_prediction(sequence_var.size(),
                                                                        user_representation)

                self._optimizer.zero_grad()

                loss = self._loss_func(positive_prediction,
                                       negative_prediction,
                                       mask=(sequence_var != PADDING_IDX))
                epoch_loss += loss.item()

                loss.backward()

                self._optimizer.step()

            epoch_loss /= minibatch_num + 1

            if verbose:
                print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))

            if np.isnan(epoch_loss) or epoch_loss == 0.0:
                raise ValueError('Degenerate epoch loss: {}'
                                 .format(epoch_loss))
Esempio n. 6
0
    def _get_negative_prediction(self, user_ids):

        negative_items = sample_items(self._num_items,
                                      len(user_ids),
                                      random_state=self._random_state)
        negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda)

        negative_prediction = self._net(user_ids, negative_var)

        return negative_prediction
Esempio n. 7
0
    def _get_negative_prediction(self, shape, user_representation):

        negative_items = sample_items(
            self._num_items,
            shape,
            random_state=self._random_state)
        negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda)

        negative_prediction = self._net(user_representation, negative_var)

        return negative_prediction
Esempio n. 8
0
    def predict(self, sequences, item_ids=None):
        self._net.train(False)

        sequences = np.atleast_2d(sequences)

        if item_ids is None:
            item_ids = np.arange(self._num_items).reshape(-1, 1)

        self._check_input(item_ids)
        self._check_input(sequences)

        sequences = torch.from_numpy(sequences.astype(np.int64).reshape(1, -1))
        item_ids = torch.from_numpy(item_ids.astype(np.int64))

        sequence_var = gpu(sequences, self._use_cuda)
        item_var = gpu(item_ids, self._use_cuda)

        _, sequence_representations = self._net.user_representation(sequence_var)
        size = (len(item_var),) + sequence_representations.size()[1:]
        out = self._net(sequence_representations.expand(*size),
                        item_var)

        return cpu(out).detach().numpy().flatten()
Esempio n. 9
0
    def _initialize(self, interactions):

        self._num_items = interactions.num_items

        if self._representation == 'pooling':
            self._net = PoolNet(self._num_items,
                                self._embedding_dim,
                                sparse=self._sparse)
        elif self._representation == 'cnn':
            self._net = CNNNet(self._num_items,
                               self._embedding_dim,
                               sparse=self._sparse)
        elif self._representation == 'lstm':
            self._net = LSTMNet(self._num_items,
                                self._embedding_dim,
                                sparse=self._sparse)
        elif self._representation == 'mixture':
            self._net = MixtureLSTMNet(self._num_items,
                                       self._embedding_dim,
                                       sparse=self._sparse)
        else:
            self._net = self._representation

        self._net = gpu(self._net, self._use_cuda)

        if self._optimizer_func is None:
            self._optimizer = optim.Adam(
                self._net.parameters(),
                weight_decay=self._l2,
                lr=self._learning_rate
            )
        else:
            self._optimizer = self._optimizer_func(self._net.parameters())

        if self._loss == 'pointwise':
            self._loss_func = pointwise_loss
        elif self._loss == 'bpr':
            self._loss_func = bpr_loss
        elif self._loss == 'hinge':
            self._loss_func = hinge_loss
        else:
            self._loss_func = adaptive_hinge_loss