예제 #1
0
def test_hooks_removed_after_summarize(mode):
    """ Test that all hooks were properly removed after summary, even ones that were not run. """
    model = UnorderedModel()
    summary = ModelSummary(model, mode=mode)
    # hooks should be removed
    for _, layer in summary.summarize().items():
        handle = layer._hook_handle
        assert handle.id not in handle.hooks_dict_ref()
	def __init__(self, in_dims=None, num_classes=None, num_samples=0, **kwargs):

		super().__init__()
		self.save_hyperparameters()

		actfunc = torch.nn.LeakyReLU
		bias = True
		prior = [1., 'laplace'][0]
		if self.hparams.model == 'bnn':
			in_features = prod(in_dims)
			self.bnn = Sequential(	Flatten(1, -1),
						MC_ExpansionLayer(num_MC=self.hparams.num_MC, input_dim=2),
						BayesLinear(in_features,self.hparams.num_hidden, prior=prior, bias=bias),
						actfunc(),
						BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden,prior=prior, bias=bias),
						actfunc(),
						BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden, prior=prior, bias=bias),
						actfunc(),
						# BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden, prior=prior, bias=bias),
						# actfunc(),
						# BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden, prior=prior, bias=bias),
						# actfunc(),
						BayesLinear(self.hparams.num_hidden,num_classes, prior=prior, bias=bias)
						# BayesLinear(self.hparams.num_hidden + 1,num_classes, prior=prior)
						)

		if self.hparams.model == 'cbnn':
			debug = 1
			layer_args = {'kernel_size': 5, 'padding': 2, 'stride': 1, 'num_MC': self.hparams.num_MC}
			self.bnn = Sequential(	MC_ExpansionLayer(num_MC=self.hparams.num_MC, input_dim=4),
						# PrintModule(),
						BayesConv2d(in_channels=in_dims[0], out_channels=int(96/debug), **layer_args),
						MC_BatchNorm2D(int(96/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesConv2d(in_channels=int(96/debug), out_channels=int(128/debug), **layer_args),
						MC_BatchNorm2D(int(128/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesConv2d(in_channels=int(128/debug), out_channels=int(256/debug), **layer_args),
						MC_BatchNorm2D(int(256/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesConv2d(in_channels=int(256/debug), out_channels=int(128/debug), **layer_args),
						MC_BatchNorm2D(int(128/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesAdaptiveInit_FlattenAndLinear(self.hparams.num_hidden),
						actfunc(),
						BayesLinear(self.hparams.num_hidden, num_classes)
			)
			self.bnn(torch.randn(1, *in_dims, dtype=torch.float32))

		self.criterion = MC_CrossEntropyLoss(num_samples=self.hparams.num_samples)

		self.summary = ModelSummary(model=self)
		self.num_params = ModelSummary(model=self).param_nums[0]
예제 #3
0
def test_max_depth_param(max_depth):
    """Test that only the modules up to the desired depth are shown"""
    model = DeepNestedModel()
    summary = ModelSummary(model, max_depth=max_depth)
    for lname in summary.layer_names:
        if max_depth >= 0:
            assert lname.count(".") < max_depth
예제 #4
0
파일: run.py 프로젝트: toyai/neuro_models
def main(cfg: DictConfig = None):
    log.info("==> Training Configs:\n%s", OmegaConf.to_yaml(cfg))

    width, _, img_size, dropout_p, _, _ = compound_params(cfg.name)
    transforms = T.Compose(
        [
            T.Resize(size=(img_size, img_size)),
            T.ToTensor(),
            T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ]
    )
    if cfg.pretrained:
        network = EfficientNet(
            name=cfg.name,
            num_classes=cfg.num_classes,
        ).from_pretrained(name=cfg.name)
        for params in network.parameters():
            params.requires_grad = False

        final_out_channels = round_filters(1280, 8, width)

        network.classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(1),
            nn.Dropout(dropout_p),
            nn.Linear(final_out_channels, cfg.num_classes),
        )
    else:
        network = EfficientNet(name=cfg.name, num_classes=cfg.num_classes)

    gym = EfficientNetGym(network, cfg)
    dm = instantiate(
        cfg.dm,
        **{"train_transforms_conf": transforms, "test_transforms_conf": transforms},
    )

    with open(f"{cfg.name}.md", "w") as f:
        f.write(f"## {cfg.name}\n```py\n")
        f.write(str(network))
        f.write("\n```")

    with open(f"{cfg.name}-summary.md", "w") as f:
        f.write(f"## {cfg.name}-summary\n```py\n")
        f.write(str(ModelSummary(gym, "full")))
        f.write("\n```")

    if cfg.logger:
        logger_ = WandbLogger(
            name=f"{cfg.optim}",
            project=cfg.name,
        )
        logger_.watch(network, "all")
    else:
        logger_ = True

    ckpt = ModelCheckpoint("ckpt/{epoch}", prefix="-" + cfg.name) if cfg.ckpt else False
    trainer = Trainer(**cfg.pl, logger=logger_, checkpoint_callback=ckpt)
    trainer.fit(gym, datamodule=dm)
    if cfg.test:
        trainer.test(datamodule=dm)
예제 #5
0
def test_run(directory: Path):
    model_cls, hparams = get_model_class_and_hparams(directory)
    val_loader, test_loader = get_val_and_test_dataloader(hparams)
    checkpoint = get_checkpoint_path(directory)

    model = model_cls.load_from_checkpoint(checkpoint, **hparams)
    trainer = pl.Trainer(
        gpus=-1 if GPU_AVAILABLE else None,
        logger=False,
        accelerator='ddp'
    )
    val_results = trainer.test(model, test_dataloaders=val_loader)[0]
    test_results = trainer.test(model, test_dataloaders=test_loader)[0]
    summary = ModelSummary(model, 'top')
    n_params = sum(summary.param_nums)
    result = {
        **hparams,
        **{
            key.replace('test', 'val'): float(value)
            for key, value in val_results.items()
        },
        **{key: float(value) for key, value in test_results.items()},
        'n_parameters': n_params
    }
    return result
예제 #6
0
        def on_train_start(self, trainer, pl_module):
            """
            Logs Optimizer related metrics when the train begins

            :param trainer: pytorch lightning trainer instance
            :param pl_module: pytorch lightning base module
            """
            try_mlflow_log(mlflow.set_tag, "Mode", "training")
            try_mlflow_log(mlflow.log_param, "epochs", trainer.max_epochs)

            for callback in trainer.callbacks:
                if isinstance(callback, pl.callbacks.early_stopping.EarlyStopping):
                    self.early_stopping = True
                    self._log_early_stop_params(callback)

            if hasattr(trainer, "optimizers"):
                for optimizer in trainer.optimizers:
                    try_mlflow_log(mlflow.log_param, "optimizer_name", type(optimizer).__name__)
                    optimizer_name = type(optimizer).__name__.lower() + "_optimizer"

                    if hasattr(optimizer, "defaults"):
                        optim_dict = optimizer.defaults

                        if "lr" in optim_dict:
                            try_mlflow_log(
                                mlflow.log_param,
                                "learning_rate_" + optimizer_name,
                                optim_dict["lr"],
                            )

                        if "eps" in optim_dict:
                            try_mlflow_log(
                                mlflow.log_param, "epsilon_" + optimizer_name, optim_dict["eps"]
                            )

                        if "betas" in optim_dict:
                            try_mlflow_log(
                                mlflow.log_param, "betas_" + optimizer_name, optim_dict["betas"]
                            )

                        if "weight_decay" in optim_dict:
                            try_mlflow_log(
                                mlflow.log_param,
                                "weight_decay_" + optimizer_name,
                                optim_dict["weight_decay"],
                            )

            summary = str(ModelSummary(pl_module, mode="full"))
            tempdir = tempfile.mkdtemp()
            try:
                summary_file = os.path.join(tempdir, "model_summary.txt")
                with open(summary_file, "w") as f:
                    f.write(summary)

                try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
            finally:
                shutil.rmtree(tempdir)
예제 #7
0
    def _pre_training_setup(self, model: BasePipeline) -> None:
        """Set up DataLoaders, optimizers, learning rate schedulers, etc. before training starts."""
        self.train_dataloader = model.train_dataloader()
        self.val_dataloader = model.val_dataloader()

        if self.verbosity != 0 and self.weights_summary is not None:
            try:
                max_depth = ModelSummary.MODES[self.weights_summary]
                print(ModelSummary(model, max_depth=max_depth))
            except TypeError:
                # compatible with old ``ModelSummary`` API used in versions prior to ``1.6``
                print(ModelSummary(model, mode=self.weights_summary))

        # log model hyperparameters, if applicable
        if self.logger is not None:
            self.logger.log_hyperparams(model.hparams)
            self.logger.save()

        # move the model over to the device
        model.to(self.device)
def test_v1_6_0_deprecated_model_summary_mode(tmpdir):
    model = BoringModel()
    with pytest.deprecated_call(
            match="Argument `mode` in `ModelSummary` is deprecated in v1.4"):
        ModelSummary(model, mode="top")

    with pytest.deprecated_call(
            match=
            "Argument `mode` in `LightningModule.summarize` is deprecated in v1.4"
    ):
        model.summarize(mode="top")
	def __init__(self, in_dims=None, num_classes=None, **kwargs):

		super().__init__()
		self.save_hyperparameters()

		actfunc = torch.nn.LeakyReLU
		if self.hparams.model=='nn':
			in_features = prod(in_dims)
			self.nn = Sequential(	Flatten(1,-1),
						Linear(in_features,self.hparams.num_hidden),
						actfunc(),
						Linear(self.hparams.num_hidden,self.hparams.num_hidden),
						actfunc(),
						Linear(self.hparams.num_hidden,self.hparams.num_hidden),
						actfunc(),
						# Linear(self.hparams.num_hidden,self.hparams.num_hidden),
						# actfunc(),
						# Linear(self.hparams.num_hidden,self.hparams.num_hidden),
						# actfunc(),
						Linear(self.hparams.num_hidden,num_classes)
						)
		elif self.hparams.model=='cnn':
			layer_args = {'kernel_size':5, 'padding':2, 'stride':1}
			self.nn = Sequential(
						Conv2d(in_channels=in_dims[0], out_channels=96, **layer_args),
						BatchNorm2d(96),
						actfunc(),
						MaxPool2d(kernel_size=2, stride=2),
						Conv2d(in_channels=96, out_channels=128, **layer_args),
						BatchNorm2d(128),
						actfunc(),
						MaxPool2d(kernel_size=2, stride=2),
						Conv2d(in_channels=128, out_channels=256, **layer_args),
						BatchNorm2d(256),
						actfunc(),
						MaxPool2d(kernel_size=2, stride=2),
						Conv2d(in_channels=256, out_channels=128, **layer_args),
						BatchNorm2d(128),
						actfunc(),
						MaxPool2d(kernel_size=2, stride=2),
						AdaptiveInit_FlattenAndLinear(self.hparams.num_hidden),
						actfunc(),
						Linear(self.hparams.num_hidden, num_classes)
					)
			self.nn(torch.randn(1, *in_dims))

		elif self.hparams.model=='resnet18':
			self.nn = ResNet18(in_dims[0])

		self.criterion = CrossEntropyLoss()
		self.summary = ModelSummary(model=self)
예제 #10
0
def test_lazy_model_summary():
    """ Test that the model summary can work with lazy layers. """
    lazy_model = LazyModel()
    summary = ModelSummary(lazy_model)

    with pytest.warns(
            UserWarning,
            match=r"A layer with UninitializedParameter was found. "
            r"Thus, the total number of parameters detected may be inaccurate."
    ):
        if _TORCH_GREATER_EQUAL_1_9:
            assert summary.total_parameters == 0
            assert summary.trainable_parameters == 0
        else:
            # bug in 1.8: the bias of a LazyLinear layer is initialized!
            # https://github.com/pytorch/pytorch/issues/58350
            assert summary.total_parameters == 7
            assert summary.trainable_parameters == 7
예제 #11
0
            def on_train_start(self, trainer, pl_module):
                """
                Logs Optimizer related metrics when the train begins

                :param trainer: pytorch lightning trainer instance
                :param pl_module: pytorch lightning base module
                """
                try_mlflow_log(mlflow.set_tag, "Mode", "training")
                try_mlflow_log(mlflow.log_param, "epochs", trainer.max_epochs)

                for callback in trainer.callbacks:
                    if isinstance(callback,
                                  pl.callbacks.early_stopping.EarlyStopping):
                        self.early_stopping = True
                        self._log_early_stop_params(callback)

                # TODO For logging optimizer params - Following scenarios are to revisited.
                # 1. In the current scenario, only the first optimizer details are logged.
                #    Code to be enhanced to log params when multiple optimizers are used.
                # 2. mlflow.log_params is used to store optimizer default values into mlflow.
                #    The keys in default dictionary are too short, Ex: (lr - learning_rate).
                #    Efficient mapping technique needs to be introduced
                #    to rename the optimizer parameters based on keys in default dictionary.

                if hasattr(trainer, "optimizers"):
                    optimizer = trainer.optimizers[0]
                    try_mlflow_log(mlflow.log_param, "optimizer_name",
                                   _get_optimizer_name(optimizer))

                    if hasattr(optimizer, "defaults"):
                        try_mlflow_log(mlflow.log_params, optimizer.defaults)

                summary = str(ModelSummary(pl_module, mode="full"))
                tempdir = tempfile.mkdtemp()
                try:
                    summary_file = os.path.join(tempdir, "model_summary.txt")
                    with open(summary_file, "w") as f:
                        f.write(summary)

                    try_mlflow_log(mlflow.log_artifact,
                                   local_path=summary_file)
                finally:
                    shutil.rmtree(tempdir)
예제 #12
0
 def summarize(self, mode):
     model_summary = ModelSummary(self, mode=mode)
     log.info('\n' + model_summary.__str__())
예제 #13
0
def patched_fit(original, self, *args, **kwargs):
    """
    A patched implementation of `pytorch_lightning.Trainer.fit` which enables logging the
    following parameters, metrics and artifacts:

    - Training epochs
    - Optimizer parameters
    - `EarlyStoppingCallback`_ parameters
    - Metrics stored in `trainer.callback_metrics`
    - Model checkpoints
    - Trained model

    .. _EarlyStoppingCallback:
        https://pytorch-lightning.readthedocs.io/en/latest/early_stopping.html
    """
    run_id = mlflow.active_run().info.run_id
    tracking_uri = mlflow.get_tracking_uri()
    client = MlflowAutologgingQueueingClient(tracking_uri)
    metrics_logger = BatchMetricsLogger(run_id, tracking_uri)

    log_models = get_autologging_config(mlflow.pytorch.FLAVOR_NAME,
                                        "log_models", True)
    log_every_n_epoch = get_autologging_config(mlflow.pytorch.FLAVOR_NAME,
                                               "log_every_n_epoch", 1)
    log_every_n_step = get_autologging_config(mlflow.pytorch.FLAVOR_NAME,
                                              "log_every_n_step", None)

    early_stop_callback = None
    for callback in self.callbacks:
        if isinstance(callback, pl.callbacks.early_stopping.EarlyStopping):
            early_stop_callback = callback
            _log_early_stop_params(early_stop_callback, client, run_id)

    if not any(
            isinstance(callbacks, __MLflowPLCallback)
            for callbacks in self.callbacks):
        self.callbacks += [
            __MLflowPLCallback(client, metrics_logger, run_id, log_models,
                               log_every_n_epoch, log_every_n_step)
        ]

    client.flush(synchronous=False)

    result = original(self, *args, **kwargs)

    if early_stop_callback is not None:
        _log_early_stop_metrics(early_stop_callback, client, run_id)

    if Version(pl.__version__) < Version("1.4.0"):
        summary = str(ModelSummary(self.model, mode="full"))
    else:
        summary = str(ModelSummary(self.model, max_depth=-1))

    tempdir = tempfile.mkdtemp()
    try:
        summary_file = os.path.join(tempdir, "model_summary.txt")
        with open(summary_file, "w") as f:
            f.write(summary)

        mlflow.log_artifact(local_path=summary_file)
    finally:
        shutil.rmtree(tempdir)

    if log_models:
        registered_model_name = get_autologging_config(
            mlflow.pytorch.FLAVOR_NAME, "registered_model_name", None)
        mlflow.pytorch.log_model(
            pytorch_model=self.model,
            artifact_path="model",
            registered_model_name=registered_model_name,
        )

        if early_stop_callback is not None and self.checkpoint_callback.best_model_path:
            mlflow.log_artifact(
                local_path=self.checkpoint_callback.best_model_path,
                artifact_path="restored_model_checkpoint",
            )

    client.flush(synchronous=True)

    return result
예제 #14
0
    # encoder = Encoder()
    # print(encoder)
    # print("x.shape:", x.shape)
    # z, shortcuts = encoder(x)
    # print("z.shape:", z.shape)
    # print("Shortcuts shape:", [shortcut.shape for shortcut in shortcuts])

    # # Test Decoder class
    # z_shape = (32, 256, 54)
    # z = torch.rand(z_shape)
    # decoder = Decoder()
    # print(decoder)
    # x_hat = decoder(z, None)
    # print("x_hat.shape:", x_hat.shape)

    # Test UTimeModel Class
    # utime = UTimeModel(in_channels=in_channels)
    utime = UTimeModel(vars(args))
    utime.example_input_array = torch.zeros(x_shape)
    utime.configure_optimizers()
    model_summary = ModelSummary(utime, "full")
    print(model_summary)
    print(utime)
    print(x.shape)
    # z = utime(x)
    z = utime.classify_segments(x)
    print(z.shape)
    print("x.shape:", x.shape)
    print("z.shape:", z.shape)
    print(z.sum(dim=1))
예제 #15
0
 def summarize(self, mode):
     model_summary = ModelSummary(self, mode=mode)
     logging.info(model_summary)
예제 #16
0
        # )

        self.myModel = Module(in_channels=1,
                              out_channels=self.out_classes,
                              dimensions=3)

    def forward(self, x):
        # return self.unet(x)
        return self.myModel(x)


class HighResNetModel(pl.LightningModule):
    def __init__(self):
        super().__init__()
        self.example_input_array = torch.zeros(1, 1, 96, 96, 96)

        self.unet = HighResNet(in_channels=1, out_channels=139, dimensions=3)

    def forward(self, x):
        return self.unet(x)


if __name__ == "__main__":
    # HighResNet = HighResNetModel()
    # print("highResNet Model:")
    # print(ModelSummary(HighResNet, mode="full"))

    newNet = NewModel()
    print("newNet Model:")
    print(ModelSummary(newNet, mode="full"))