Exemple #1
0
    def test_step(self, batch, batch_idx):

        x, y = batch

        logits, _ = self(x)

        logits[x.nonzero(as_tuple=True)] = .0

        logits = logits.cpu().numpy()
        y = y.cpu().numpy()

        full_metrics = dict()
        full_raw_metrics = dict()
        for trait in TRAITS:
            _, _, te_low_idxs, te_high_idxs = self.low_high_indxs[trait]
            _, metrics, metrics_raw = eval_proced(logits, y, te_high_idxs, te_low_idxs, 'test')
            # Changing the tag for some metrics!
            metrics = {k if ('high' not in k and 'low' not in k) else k[:5] + trait + '_' + k[5:]: v for k, v in
                       metrics.items()}
            metrics_raw = {k if ('high' not in k and 'low' not in k) else trait + '_' + k: v for k, v in
                           metrics_raw.items()}
            full_metrics.update(metrics)
            full_raw_metrics.update(metrics_raw)

        return {"full_metrics": full_metrics, 'full_raw_metrics': full_raw_metrics}
        # Only focusing on validation data
        Atild = Atild[sp_tr_data.shape[0]:, :]
        # Removing entries from training data
        Atild[sp_vd_tr_data.nonzero()] = .0

        preds = Atild.toarray()
        true = sp_vd_te_data.toarray()

        summ = SummaryWriter(log_val_str)

        # Compute metrics at different thresholds
        full_metrics = dict()
        val_metric = None
        for trait in TRAITS:
            vd_low_idxs, vd_high_idxs, _, _ = low_high_indxs[trait]
            val_metric, metrics, _ = eval_proced(preds, true, vd_high_idxs,
                                                 vd_low_idxs, 'val')
            # Changing the tag for some metrics
            metrics = {
                k if ('high' not in k and 'low' not in k) else k[:4] + trait +
                '_' + k[4:]: v
                for k, v in metrics.items()
            }
            full_metrics.update(metrics)

        if val_metric > best_value:
            print('New best model found')
            best_value = val_metric
            best_config = config

        # Logging hyperparams and metrics
        hparams = {**config, 'seed': seed}
Exemple #3
0
            logits[x.nonzero(as_tuple=True)] = .0

            # Fetching all predictions and ground_truth labels
            all_logits.append(logits.detach().cpu().numpy())
            all_y.append(y.detach().cpu().numpy())

        preds = np.concatenate(all_logits)
        true = np.concatenate(all_y)

        full_metrics = dict()
        full_raw_metrics = dict()
        for trait in DEMO_TRAITS:
            user_groups = user_groups_all_traits[trait]

            _, metrics, metrics_raw = eval_proced(preds=preds,
                                                  true=true,
                                                  tag='test',
                                                  user_groups=user_groups,
                                                  tids_path=tids_path,
                                                  entropy_norm=True)
            full_metrics.update(metrics)
            full_raw_metrics.update(metrics_raw)

        # Logging hyperparams and metrics
        summ.add_hparams({**best_config, 'fold_n': fold_n}, full_metrics)
        summ.flush()

        # Saving results and predictions
        pickle_dump(full_metrics, os.path.join(log_te_str, 'full_metrics.pkl'))
        pickle_dump(full_raw_metrics, os.path.join(log_te_str, 'full_raw_metrics.pkl'))