def recommend(self, data, top_k, return_preds=False, allow_repeat=False):
        # Set model to eval mode
        model = self.net.to(self.device)
        model.eval()

        n_rows = data.shape[0]
        idx_list = np.arange(n_rows)
        recommendations = np.empty([n_rows, top_k], dtype=np.int64)
        all_preds = list()
        with torch.no_grad():
            for batch_idx in minibatch(idx_list,
                                       batch_size=self.args.valid_batch_size):
                batch_data = data[batch_idx]
                batch_tensor = sparse2tensor(batch_data).to(self.device)

                preds = model(batch_tensor, batch_user=batch_idx, predict=True)
                if return_preds:
                    all_preds.append(preds)
                if not allow_repeat:
                    preds[batch_data.nonzero()] = -np.inf
                if top_k > 0:
                    _, recs = preds.topk(k=top_k, dim=1)
                    recommendations[batch_idx] = recs.cpu().numpy()

        if return_preds:
            return recommendations, torch.cat(all_preds, dim=0).cpu()
        else:
            return recommendations
        def compute_adv_grads():
            """Helper function for computing the adversarial gradient."""
            # Reset the surrogate model.
            sur_args = Bunch(self.args.surrogate)
            sur_args.model_load_path = ""
            sur_trainer_class = sur_args.model["trainer_class"]
            sur_trainer_ = sur_trainer_class(
                n_users=self.n_users + self.n_fakes,
                n_items=self.n_items,
                args=sur_args)

            data_tensor = torch.cat(
                [sparse2tensor(train_data).to(self.device),
                 self.fake_tensor.detach().clone().to(self.device)], dim=0)
            data_tensor.requires_grad_()

            # Train surrogate model.
            adv_loss_, adv_grads_ = sur_trainer_.fit_adv(
                data_tensor=data_tensor,
                epoch_num=sur_args["epochs"],
                unroll_steps=self.args.unroll_steps,
                n_fakes=self.n_fakes,
                target_items=self.target_items
            )
            return sur_trainer_, adv_loss_, adv_grads_
Exemple #3
0
    def train_epoch(self, data):
        # Transpose the data first for ItemVAE.
        data = data.transpose()

        n_rows = data.shape[0]
        n_cols = data.shape[1]
        idx_list = np.arange(n_rows)

        # Set model to training mode.
        model = self.net.to(self.device)
        model.train()
        np.random.shuffle(idx_list)

        epoch_loss = 0.0
        batch_size = (self.args.batch_size
                      if self.args.batch_size > 0 else len(idx_list))
        for batch_idx in minibatch(idx_list, batch_size=batch_size):
            batch_tensor = sparse2tensor(data[batch_idx]).to(self.device)

            # Compute loss
            outputs = model(batch_tensor)
            loss = model.loss(data=batch_tensor, outputs=outputs).sum()
            epoch_loss += loss.item()

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

        return epoch_loss
    def train_epoch(self, data):
        n_rows = data.shape[0]
        n_cols = data.shape[1]
        idx_list = np.arange(n_rows)

        # Set model to training mode.
        model = self.net.to(self.device)
        model.train()
        np.random.shuffle(idx_list)

        epoch_loss = 0.0
        counter = 0
        for batch_idx in minibatch(idx_list, batch_size=self.args.batch_size):
            batch_tensor = sparse2tensor(data[batch_idx]).to(self.device)

            # Compute loss
            outputs = model(batch_tensor, batch_user=batch_idx)
            loss = model.loss(data=batch_tensor, outputs=outputs).mean()
            epoch_loss += loss.item()

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            counter += 1

        return epoch_loss / counter
    def recommend(self, data, top_k, return_preds=False, allow_repeat=False):
        model = self.net

        n_rows = data.shape[0]
        n_cols = data.shape[1]
        idx_list = np.arange(n_rows)

        nns_sims = torch.zeros([n_cols, n_cols])
        for item in range(n_cols):
            topk_sims, topk_nns = model(item_id=item)
            nns_sims[item].put_(topk_nns, topk_sims)

        recommendations = np.empty([n_rows, top_k], dtype=np.int64)
        all_preds = list()
        with torch.no_grad():
            for batch_idx in minibatch(idx_list,
                                       batch_size=self.args.valid_batch_size):
                batch_tensor = sparse2tensor(data[batch_idx])

                preds = torch.mm(batch_tensor, nns_sims)
                if return_preds:
                    all_preds.append(preds)
                if not allow_repeat:
                    preds[data[batch_idx].nonzero()] = -np.inf
                if top_k > 0:
                    _, recs = preds.topk(k=top_k, dim=1)
                    recommendations[batch_idx] = recs.cpu().numpy()

        if return_preds:
            return recommendations, torch.cat(all_preds, dim=0).cpu()
        else:
            return recommendations
Exemple #6
0
    def train_sgd(self, data):
        n_rows = data.shape[0]
        n_cols = data.shape[1]
        idx_list = np.arange(n_rows)

        # Set model to training mode.
        model = self.net.to(self.device)
        model.train()
        np.random.shuffle(idx_list)

        epoch_loss = 0.0
        batch_size = (self.args.batch_size
                      if self.args.batch_size > 0 else len(idx_list))
        for batch_idx in minibatch(idx_list, batch_size=batch_size):
            batch_tensor = sparse2tensor(data[batch_idx]).to(self.device)

            # Compute loss
            outputs = model(user_id=batch_idx)
            loss = mse_loss(data=batch_tensor,
                            logits=outputs,
                            weight=self.weight_alpha).sum()
            epoch_loss += loss.item()

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

        return epoch_loss
Exemple #7
0
    def train_als(self, data):
        model = self.net  # A warning will raise if use .to() method here.
        P = model.P.detach()
        Q = model.Q.detach()

        weight_alpha = self.weight_alpha - 1
        # Using PyTorch for ALS optimization
        # Update P
        lamda_eye = torch.eye(self.dim).to(self.device) * self.args.l2
        # residual = Q^tQ + lambda*I
        residual = torch.mm(Q.t(), Q) + lamda_eye
        for user, batch_data in enumerate(data):
            # x_u: N x 1
            x_u = sparse2tensor(batch_data).to(self.device).t()
            # Cu = diagMat(alpha * rating + 1)
            cu = batch_data.toarray().squeeze() * weight_alpha + 1
            Cu = _array2sparsediag(cu).to(self.device)
            Cu_minusI = _array2sparsediag(cu - 1).to(self.device)
            # Q^tCuQ + lambda*I = Q^tQ + lambda*I + Q^t(Cu-I)Q
            # left hand side
            lhs = torch.mm(Q.t(), Cu_minusI.mm(Q)) + residual
            # right hand side
            rhs = torch.mm(Q.t(), Cu.mm(x_u))

            new_p_u = torch.mm(lhs.inverse(), rhs)
            model.P[user] = new_p_u.t()

        # Update Q
        data = data.transpose()
        # residual = P^tP + lambda*I
        residual = torch.mm(P.t(), P) + lamda_eye
        for item, batch_data in enumerate(data):
            # x_v: M x 1
            x_v = sparse2tensor(batch_data).to(self.device).t()
            # Cv = diagMat(alpha * rating + 1)
            cv = batch_data.toarray().squeeze() * weight_alpha + 1
            Cv = _array2sparsediag(cv).to(self.device)
            Cv_minusI = _array2sparsediag(cv - 1).to(self.device)
            # left hand side
            lhs = torch.mm(P.t(), Cv_minusI.mm(P)) + residual
            # right hand side
            rhs = torch.mm(P.t(), Cv.mm(x_v))

            new_q_v = torch.mm(lhs.inverse(), rhs)
            model.Q[item] = new_q_v.t()

        return 0
    def _initialize(self, train_data):
        """Initialize fake data."""
        fake_data = self.init_fake_data(train_data=train_data)

        self.fake_tensor = sparse2tensor(fake_data)
        self.fake_tensor.requires_grad_()

        self.optimizer = optim.SGD([self.fake_tensor],
                                   lr=self.args.adv_lr,
                                   momentum=self.args.adv_momentum)
Exemple #9
0
    def recommend(self, data, top_k, return_preds=False, allow_repeat=False):
        # Set model to eval mode
        model = self.net.to(self.device)
        model.eval()

        # Transpose the data first for ItemVAE.
        data = data.transpose()

        n_rows = data.shape[0]
        n_cols = data.shape[1]

        idx_list = np.arange(n_rows)
        recommendations = np.empty([n_cols, top_k], dtype=np.int64)

        # Make predictions first, and then sort for top-k.
        all_preds = list()
        with torch.no_grad():
            for batch_idx in minibatch(idx_list,
                                       batch_size=self.args.valid_batch_size):
                data_tensor = sparse2tensor(data[batch_idx]).to(self.device)
                preds = model(data_tensor)
                all_preds.append(preds)

        all_preds = torch.cat(all_preds, dim=0).t()
        data = data.transpose()
        idx_list = np.arange(n_cols)
        for batch_idx in minibatch(idx_list,
                                   batch_size=self.args.valid_batch_size):
            batch_data = data[batch_idx].toarray()
            preds = all_preds[batch_idx]
            if not allow_repeat:
                preds[batch_data.nonzero()] = -np.inf
            if top_k > 0:
                _, recs = preds.topk(k=top_k, dim=1)
                recommendations[batch_idx] = recs.cpu().numpy()

        if return_preds:
            return recommendations, all_preds.cpu()
        else:
            return recommendations