def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name
        self.patch_size = config.patch_size

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        # TASK: SlicesDataset class is not complete. Go to the file and complete it.
        # Note that we are using a 2D version of UNet here, which means that it will expect
        # batches of 2D slices.
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # Do we have CUDA available?
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        # We will use a recursive UNet model from German Cancer Research Center,
        # Division of Medical Image Computing. It is quite complicated and works
        # very well on this task. Feel free to explore it or plug in your own model
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        # We are using a standard cross-entropy loss since the model output is essentially
        # a tensor with softmax'd prediction of each pixel's probability of belonging
        # to a certain class
        self.loss_function = torch.nn.CrossEntropyLoss()

        # We are using standard SGD method to optimize our weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Scheduler helps us update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder. You need to launch
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")
示例#2
0
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        # Note that we are using a 2D version of UNet here, which means that it will expect
        # batches of 2D slices.
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # USE CUDA
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        # We will use a recursive UNet model from German Cancer Research Center,
        # Division of Medical Image Computing.
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        self.loss_function = torch.nn.CrossEntropyLoss()
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder.
        # To check the training results, run the tensorboard.
        # Like how we use the jupyter notebook remotely.
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # Do we have CUDA available?
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        # Cross entropy loss
        self.loss_function = torch.nn.CrossEntropyLoss()

        # We are using standard SGD method to optimize our weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Scheduler helps us update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder.
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")
示例#4
0
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # access volumes directly for testing
        self.test_data = dataset[split["test"]]

        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # use a recursive UNet model from German Cancer Research Center, Division of Medical Image Computing
        self.model = UNet()
        self.model.to(self.device)

        # use a standard cross-entropy loss since the model output is essentially
        # a tensor with softmax prediction of each pixel's probability of belonging to a certain class
        self.loss_function = torch.nn.CrossEntropyLoss()

        # use standard SGD method to optimize the weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)

        # Scheduler helps to update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')