예제 #1
0
                    val_metrics = []
                    for x, y in vd_loader:
                        x = x.to(device)
                        logits, _ = model(x)

                        # Removing items from training data
                        logits[x.nonzero(as_tuple=True)] = .0

                        logits = logits.detach().cpu().numpy()
                        val_metrics += list(
                            eval_metric(logits, y, aggregated=False))

                    curr_value = np.mean(val_metrics)
                    summ.add_scalar('val/ndcg_50', curr_value, epoch)
                    summ.flush()

                    if curr_value > best_value:
                        print('New best model found')
                        best_value = curr_value

                        pickle_dump(
                            config, os.path.join(log_val_str,
                                                 'best_config.pkl'))
                        torch.save(model.state_dict(),
                                   os.path.join(log_val_str, 'best_model.pth'))
            # Logging hyperparams and metrics
            summ.add_hparams({
                **config, 'fold_n': fold_n
            }, {'val/ndcg_50': curr_value})
            summ.flush()
예제 #2
0
            logits[x.nonzero(as_tuple=True)] = .0

            # Fetching all predictions and ground_truth labels
            all_logits.append(logits.detach().cpu().numpy())
            all_y.append(y.detach().cpu().numpy())

        preds = np.concatenate(all_logits)
        true = np.concatenate(all_y)

        full_metrics = dict()
        full_raw_metrics = dict()
        for trait in DEMO_TRAITS:
            user_groups = user_groups_all_traits[trait]

            _, metrics, metrics_raw = eval_proced(preds=preds,
                                                  true=true,
                                                  tag='test',
                                                  user_groups=user_groups,
                                                  tids_path=tids_path,
                                                  entropy_norm=True)
            full_metrics.update(metrics)
            full_raw_metrics.update(metrics_raw)

        # Logging hyperparams and metrics
        summ.add_hparams({**best_config, 'fold_n': fold_n}, full_metrics)
        summ.flush()

        # Saving results and predictions
        pickle_dump(full_metrics, os.path.join(log_te_str, 'full_metrics.pkl'))
        pickle_dump(full_raw_metrics, os.path.join(log_te_str, 'full_raw_metrics.pkl'))