Beispiel #1
0
    def validation_step(self, batch, batch_idx):
        x, y = batch

        logits, _ = self(x)

        logits[x.nonzero(as_tuple=True)] = .0

        logits = logits.cpu().numpy()
        y = y.cpu().numpy()

        val_metric = eval_metric(logits, y)
        # This is too slow, cannot run evaluation on everything
        # val_metric, metrics, metrics_raw = eval_proced(logits, y, high_idxs, low_idxs, 'val')
        # return {"val_metric": val_metric, "metrics": metrics, 'metrics_raw': metrics_raw}

        return {'val_metric': val_metric}
Beispiel #2
0
                                epoch)

                # --- Validation --- #
                if epoch % VAE_LOG_VAL_EVERY == 0:
                    model.eval()
                    val_metrics = []
                    for x, y in vd_loader:
                        x = x.to(device)
                        logits, _ = model(x)

                        # Removing items from training data
                        logits[x.nonzero(as_tuple=True)] = .0

                        logits = logits.detach().cpu().numpy()
                        val_metrics += list(
                            eval_metric(logits, y, aggregated=False))

                    curr_value = np.mean(val_metrics)
                    summ.add_scalar('val/ndcg_50', curr_value, epoch)
                    summ.flush()

                    if curr_value > best_value:
                        print('New best model found')
                        best_value = curr_value

                        pickle_dump(
                            config, os.path.join(log_val_str,
                                                 'best_config.pkl'))
                        torch.save(model.state_dict(),
                                   os.path.join(log_val_str, 'best_model.pth'))
            # Logging hyperparams and metrics
        for config in tqdm(pg, desc='configs'):

            summ = SummaryWriter(os.path.join(log_val_str, str(config)))

            Atild = BPR(A, config['factors'], config['lr'], config['reg'],
                        config['iter'])

            # Only focusing on validation data
            Atild = Atild[:sp_vd_tr_data.shape[0]]
            # Removing entries from training data
            Atild[sp_vd_tr_data.nonzero()] = .0

            preds = Atild
            true = sp_vd_te_data.toarray()

            curr_value = eval_metric(preds, true)

            # Logging hyperparams and metrics
            summ.add_hparams({
                **config, 'fold_n': fold_n
            }, {'val/ndcg_50': curr_value})
            summ.flush()

            if curr_value > best_value:
                print('New best model found')
                best_value = curr_value

                pickle_dump(config, os.path.join(log_val_str,
                                                 'best_config.pkl'))

            # Logging hyperparams and metrics