def __init__(self, config, split, dataset): self.n_epochs = config.n_epochs self.split = split self._time_start = "" self._time_end = "" self.epoch = 0 self.name = config.name # Create output folders dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}' self.out_dir = os.path.join(config.test_results_dir, dirname) os.makedirs(self.out_dir, exist_ok=True) # Create data loaders self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]), batch_size=config.batch_size, shuffle=True, num_workers=0) self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]), batch_size=config.batch_size, shuffle=True, num_workers=0) # we will access volumes directly for testing self.test_data = dataset[split["test"]] # Do we have CUDA available? if not torch.cuda.is_available(): print( "WARNING: No CUDA device is found. This may take significantly longer!" ) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") # Configure our model and other training implements self.model = UNet(num_classes=3) self.model.to(self.device) # Cross entropy loss self.loss_function = torch.nn.CrossEntropyLoss() # We are using standard SGD method to optimize our weights self.optimizer = optim.Adam(self.model.parameters(), lr=config.learning_rate) # Scheduler helps us update learning rate automatically self.scheduler = optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, 'min') # Set up Tensorboard. By default it saves data into runs folder. self.tensorboard_train_writer = SummaryWriter(comment="_train") self.tensorboard_val_writer = SummaryWriter(comment="_val")
def __init__(self, config, split, dataset): self.n_epochs = config.n_epochs self.split = split self._time_start = "" self._time_end = "" self.epoch = 0 self.name = config.name # Create output folders dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}' self.out_dir = os.path.join(config.test_results_dir, dirname) os.makedirs(self.out_dir, exist_ok=True) self.out_images_dir = os.path.join(self.out_dir, "images") os.makedirs(self.out_images_dir) # Create data loaders # Note that we are using a 2D version of UNet here, which means that it will expect # batches of 2D slices. self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]), batch_size=config.batch_size, shuffle=True, num_workers=0) self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]), batch_size=config.batch_size, shuffle=True, num_workers=0) # we will access volumes directly for testing self.test_data = dataset[split["test"]] # Do we have CUDA available? if not torch.cuda.is_available(): print("WARNING: No CUDA device is found. This may take significantly longer!") self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Configure our model and other training implements # We will use a recursive UNet model from German Cancer Research Center, # Division of Medical Image Computing. It is quite complicated and works # very well on this task. self.model = UNet(num_classes=3) self.model.to(self.device) # We are using a standard cross-entropy loss since the model output is essentially # a tensor with softmax'd prediction of each pixel's probability of belonging # to a certain class self.loss_function = torch.nn.CrossEntropyLoss() # We are using standard SGD method to optimize our weights self.optimizer = optim.Adam(self.model.parameters(), lr=config.learning_rate) # Scheduler helps us update learning rate automatically self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min') # Set up Tensorboard. By default it saves data into runs folder. You need to launch self.tensorboard_train_writer = SummaryWriter(comment="_train") self.tensorboard_val_writer = SummaryWriter(comment="_val")
def __init__(self, config, split, dataset): self.n_epochs = config.n_epochs self.split = split self._time_start = "" self._time_end = "" self.epoch = 0 self.name = config.name # Create output folders dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}' self.out_dir = os.path.join(config.test_results_dir, dirname) os.makedirs(self.out_dir, exist_ok=True) # Create data loaders self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]), batch_size=config.batch_size, shuffle=True, num_workers=0) self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]), batch_size=config.batch_size, shuffle=True, num_workers=0) # access volumes directly for testing self.test_data = dataset[split["test"]] if not torch.cuda.is_available(): print( "WARNING: No CUDA device is found. This may take significantly longer!" ) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") # use a recursive UNet model from German Cancer Research Center, Division of Medical Image Computing self.model = UNet() self.model.to(self.device) # use a standard cross-entropy loss since the model output is essentially # a tensor with softmax prediction of each pixel's probability of belonging to a certain class self.loss_function = torch.nn.CrossEntropyLoss() # use standard SGD method to optimize the weights self.optimizer = optim.Adam(self.model.parameters(), lr=config.learning_rate) # Scheduler helps to update learning rate automatically self.scheduler = optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, 'min')