예제 #1
0
    def on_pretrain_routine_start(self, trainer: Trainer,
                                  pl_module: LightningModule) -> None:
        from pl_bolts.models.self_supervised.evaluator import SSLEvaluator

        pl_module.non_linear_evaluator = SSLEvaluator(
            n_input=self.z_dim,
            n_classes=self.num_classes,
            p=self.drop_p,
            n_hidden=self.hidden_dim,
        ).to(pl_module.device)

        self.confusion_matrix = ConfusionMatrix(self.num_classes).to(
            pl_module.device)

        self.optimizer = torch.optim.Adam(
            pl_module.non_linear_evaluator.parameters(), lr=1e-4)
예제 #2
0
def test_no_scalar_compute() -> None:
    """tests that an assertion error is thrown if the wrapped basemetric gives a non-scalar on compute."""
    min_max_nsm = MinMaxMetric(ConfusionMatrix(num_classes=2))

    with pytest.raises(
            RuntimeError,
            match=r"Returned value from base metric should be a scalar .*"):
        min_max_nsm.compute()
예제 #3
0
 def __init__(self, config, trial=None):
     super(LitPSD, self).__init__()
     if trial:
         self.trial = trial
     else:
         self.trial = None
     self.pylog = logging.getLogger(__name__)
     logging.getLogger("lightning").setLevel(self.pylog.level)
     self.config = config
     if hasattr(config.system_config, "half_precision"):
         self.needs_float = not config.system_config.half_precision
     else:
         self.needs_float = True
     self.hparams = DictionaryUtility.to_dict(config)
     self.n_type = config.system_config.n_type
     self.lr = config.optimize_config.lr
     self.modules = ModuleUtility(config.net_config.imports +
                                  config.dataset_config.imports +
                                  config.optimize_config.imports)
     self.model_class = self.modules.retrieve_class(
         config.net_config.net_class)
     # self.data_module = PSDDataModule(config,self.device)
     self.model = self.model_class(config)
     self.criterion_class = self.modules.retrieve_class(
         config.net_config.criterion_class)
     self.criterion = self.criterion_class(
         *config.net_config.criterion_params)
     self.softmax = LogSoftmax(dim=1)
     self.accuracy = Accuracy()
     self.confusion = ConfusionMatrix(num_classes=self.n_type)
     if hasattr(self.config.dataset_config, "calgroup"):
         calgroup = self.config.dataset_config.calgroup
     else:
         calgroup = None
     if self.config.dataset_config.dataset_class == "PulseDatasetDet":
         self.evaluator = PhysEvaluator(
             self.config.system_config.type_names,
             self.logger,
             device=self.device)
     elif self.config.dataset_config.dataset_class == "PulseDatasetWaveformNorm":
         self.evaluator = TensorEvaluator(self.logger,
                                          calgroup=calgroup,
                                          target_has_phys=False,
                                          target_index=None,
                                          metric_name="accuracy",
                                          metric_unit="")
     else:
         if hasattr(self.config.dataset_config, "calgroup"):
             self.evaluator = PSDEvaluator(
                 self.config.system_config.type_names,
                 self.logger,
                 device=self.device,
                 calgroup=self.config.dataset_config.calgroup)
         else:
             self.evaluator = PSDEvaluator(
                 self.config.system_config.type_names,
                 self.logger,
                 device=self.device)
예제 #4
0
 def __init__(self, project_parameters):
     super().__init__()
     self.project_parameters = project_parameters
     self.backbone_model = _get_backbone_model(
         project_parameters=project_parameters)
     self.activation_function = nn.Softmax(dim=-1)
     self.loss_function = _get_loss_function(
         project_parameters=project_parameters)
     self.accuracy = Accuracy()
     self.confusion_matrix = ConfusionMatrix(
         num_classes=project_parameters.num_classes)
예제 #5
0
 def on_validation_epoch_end(self, trainer: 'pl.Trainer',
                             pl_module: 'pl.LightningModule') -> None:
     confusion = self.confusion_matrix.compute()
     confusion = confusion.type(torch.int)
     confusion_table = wandb.Table(data=confusion.tolist(),
                                   columns=self.name_classes)
     pl_module.logger.experiment.log({'confusion': confusion_table})
     table = []
     for image, pred, label in zip(self.images.unbind(0),
                                   self.mlp_preds.unbind(0),
                                   self.labels.unbind(0)):
         table.append((self.epoch, label.cpu(), pred.cpu(),
                       wandb.Image(image[0:3].detach().cpu())))
     table = wandb.Table(data=table,
                         columns=['epoch', 'label', 'pred', 'image'])
     pl_module.logger.experiment.log({'validation_sample': table})
     self.epoch += 1
     self.confusion_matrix = ConfusionMatrix(self.num_classes).to(
         pl_module.device)
     self.time_to_sample = True
예제 #6
0
def main():
    # os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
    path = r"C:\Users\Norma\PycharmProjects\Machine Learning\Datasets\MNIST\train.csv"
    x, y = parse_data(path)
    x = x / np.std(x)
    x = x - np.mean(x)
    input_shape = x.shape[1]
    train_dl, test_dl = get_data_loaders(x, y)

    loss_fn = torch.nn.CrossEntropyLoss()
    print("START TRAINING!")
    epochs = 10 + 1
    len_of_population = 50
    mutation_parameters = (torch.tensor(0, device=device, dtype=torch.float32),
                           torch.tensor(0.05,
                                        device=device,
                                        dtype=torch.float32))
    # mutation_parameters = [torch.tensor(50, device=device, dtype=torch.float32)]
    population = np.array([
        MLP(input_shape, 10, mutation_parameters)
        for _ in range(len_of_population)
    ])
    with torch.no_grad():
        arr_of_nets = fit(population, epochs, loss_fn, train_dl, test_dl)

    steps = []
    for i in range(len(history_of_fittness)):
        step = dict(
            method='restyle',
            args=[{
                "visible": [False] * len(fig.data)
            }],
        )
        count_of_plots = 10
        for k in range(count_of_plots):
            step["args"][0]["visible"][count_of_plots * i + k] = True
        steps.append(step)

    sliders = [dict(steps=steps)]
    fig.layout.sliders = sliders

    fitted_net = arr_of_nets[0]
    x, y = test_dl.dataset.tensors[0], test_dl.dataset.tensors[1]
    y_predict = fitted_net(x)
    accuracy = Accuracy().to(device)
    acc = accuracy(y_predict, y)
    print(f"Result accuracy={round(acc.item(), 3)}")
    conf_mat = ConfusionMatrix(10).to(device)
    res_c_m = conf_mat(y_predict, test_dl.dataset.tensors[1])
    print(res_c_m)
    return arr_of_nets
예제 #7
0
    # Assert that best_metric returns both index and value
    val, idx = tracker.best_metric(return_step=True)
    if isinstance(val, dict):
        for v, i in zip(val.values(), idx.values()):
            assert v != 0.0
            assert i in list(range(5))
    else:
        assert val != 0.0
        assert idx in list(range(5))


@pytest.mark.parametrize(
    "base_metric",
    [
        ConfusionMatrix(3),
        MetricCollection([ConfusionMatrix(3), Accuracy(3)]),
    ],
)
def test_best_metric_for_not_well_defined_metric_collection(base_metric):
    """Test that if user tries to compute the best metric for a metric that does not have a well defined best, we
    throw an warning and return None."""
    tracker = MetricTracker(base_metric)
    for _ in range(3):
        tracker.increment()
        for _ in range(5):
            tracker.update(torch.randint(3, (10,)), torch.randint(3, (10,)))

    with pytest.warns(UserWarning, match="Encountered the following error when trying to get the best metric.*"):
        best = tracker.best_metric()
        if isinstance(best, dict):
예제 #8
0
    def allInOne(self, thresh):
        cm = ConfusionMatrix(2, threshold=thresh)
        cb = ConfusionMatrix(self.K)
        cbm = ConfusionMatrix(self.K)

        for d in self.raw:
            cm.update(preds=torch.Tensor([d.pm]),
                      target=torch.LongTensor([d.ym]))
            cbm.update(preds=torch.Tensor([d.pb]),
                       target=torch.LongTensor([int(d.pm > thresh)]))
            if d.yb is not None:
                cb.update(preds=torch.Tensor([d.pb]),
                          target=torch.LongTensor([[d.yb]]))

        self.cm = cm.compute()
        self.cb = cb.compute()
        self.cbm = cbm.compute()
예제 #9
0
class SSLOnlineEvaluator(Callback):  # pragma: no cover
    """
    Attaches a MLP for fine-tuning using the standard self-supervised protocol.

    Example::

        # your model must have 2 attributes
        model = Model()
        model.z_dim = ... # the representation dim
        model.num_classes = ... # the num of classes in the model

        online_eval = SSLOnlineEvaluator(
            z_dim=model.z_dim,
            num_classes=model.num_classes,
            dataset='imagenet'
        )

    """
    def __init__(
        self,
        dataset: str,
        drop_p: float = 0.2,
        hidden_dim: Optional[int] = None,
        z_dim: int = None,
        num_classes: int = None,
        name_classes: List[str] = None,
    ):
        """
        Args:
            dataset: if stl10, need to get the labeled batch
            drop_p: Dropout probability
            hidden_dim: Hidden dimension for the fine-tune MLP
            z_dim: Representation dimension
            num_classes: Number of classes
        """
        super().__init__()

        self.hidden_dim = hidden_dim
        self.drop_p = drop_p
        self.optimizer: Optimizer

        self.z_dim = z_dim
        self.num_classes = num_classes
        self.name_classes = name_classes
        self.dataset = dataset

        self.confusion_matrix = None

        self.time_to_sample = True
        self.images = None
        self.mlp_preds = None
        self.labels = None
        self.batch_size = None
        self.epoch = 0

    def on_pretrain_routine_start(self, trainer: Trainer,
                                  pl_module: LightningModule) -> None:
        from pl_bolts.models.self_supervised.evaluator import SSLEvaluator

        pl_module.non_linear_evaluator = SSLEvaluator(
            n_input=self.z_dim,
            n_classes=self.num_classes,
            p=self.drop_p,
            n_hidden=self.hidden_dim,
        ).to(pl_module.device)

        self.confusion_matrix = ConfusionMatrix(self.num_classes).to(
            pl_module.device)

        self.optimizer = torch.optim.Adam(
            pl_module.non_linear_evaluator.parameters(), lr=1e-4)

    def get_representations(self, pl_module: LightningModule,
                            x: Tensor) -> Tensor:
        representations = pl_module(x)
        representations = representations.reshape(representations.size(0), -1)
        return representations

    def to_device(self, batch: Sequence,
                  device: Union[str, device]) -> Tuple[Tensor, Tensor]:
        # get the labeled batch
        if self.dataset == 'stl10':
            labeled_batch = batch[1]
            batch = labeled_batch

        inputs, y = batch

        # last input is for online eval
        x = inputs[-1]
        x = x.to(device)
        y = y.to(device)

        return x, y

    def on_train_batch_end(
        self,
        trainer: Trainer,
        pl_module: LightningModule,
        outputs: Sequence,
        batch: Sequence,
        batch_idx: int,
        dataloader_idx: int,
    ) -> None:
        x, y = self.to_device(batch, pl_module.device)

        with torch.no_grad():
            representations = self.get_representations(pl_module, x)

        representations = representations.detach()

        # forward pass
        mlp_preds = pl_module.non_linear_evaluator(
            representations)  # type: ignore[operator]
        mlp_loss = F.cross_entropy(mlp_preds, y)

        # update finetune weights
        mlp_loss.backward()
        self.optimizer.step()
        self.optimizer.zero_grad()

        # log metrics
        train_acc = accuracy(mlp_preds, y)
        pl_module.log('online_train_acc',
                      train_acc,
                      on_step=True,
                      on_epoch=False)
        pl_module.log('online_train_loss',
                      mlp_loss,
                      on_step=True,
                      on_epoch=False)

    def on_validation_batch_end(
        self,
        trainer: Trainer,
        pl_module: LightningModule,
        outputs: Sequence,
        batch: Sequence,
        batch_idx: int,
        dataloader_idx: int,
    ) -> None:
        x, y = self.to_device(batch, pl_module.device)

        with torch.no_grad():
            representations = self.get_representations(pl_module, x)

        representations = representations.detach()

        # forward pass
        mlp_preds = pl_module.non_linear_evaluator(
            representations)  # type: ignore[operator]
        mlp_loss = F.cross_entropy(mlp_preds, y)

        # log metrics
        val_acc = accuracy(mlp_preds, y)
        pl_module.log('online_val_acc',
                      val_acc,
                      on_step=False,
                      on_epoch=True,
                      sync_dist=True)
        pl_module.log('online_val_loss',
                      mlp_loss,
                      on_step=False,
                      on_epoch=True,
                      sync_dist=True)
        self.confusion_matrix(mlp_preds, y)

        if self.time_to_sample:
            N, C, H, W = batch[0][2].shape
            num = min(N, 16)
            self.images = batch[0][2][0:num]
            self.mlp_preds = torch.argmax(mlp_preds[0:num], dim=1)
            self.labels = y[0:num]
            self.time_to_sample = False

    def on_validation_epoch_end(self, trainer: 'pl.Trainer',
                                pl_module: 'pl.LightningModule') -> None:
        confusion = self.confusion_matrix.compute()
        confusion = confusion.type(torch.int)
        confusion_table = wandb.Table(data=confusion.tolist(),
                                      columns=self.name_classes)
        pl_module.logger.experiment.log({'confusion': confusion_table})
        table = []
        for image, pred, label in zip(self.images.unbind(0),
                                      self.mlp_preds.unbind(0),
                                      self.labels.unbind(0)):
            table.append((self.epoch, label.cpu(), pred.cpu(),
                          wandb.Image(image[0:3].detach().cpu())))
        table = wandb.Table(data=table,
                            columns=['epoch', 'label', 'pred', 'image'])
        pl_module.logger.experiment.log({'validation_sample': table})
        self.epoch += 1
        self.confusion_matrix = ConfusionMatrix(self.num_classes).to(
            pl_module.device)
        self.time_to_sample = True