def generate_masks(): # https://stackoverflow.com/questions/42703500/best-way-to-save-a-trained-model-in-pytorch model = UNET(in_channels=3, out_channels=1) model.load_state_dict(torch.load(r"model" + r"\blueno_detection.pth")) is_cuda = torch.cuda.is_available() if is_cuda: device = torch.device("cuda") else: device = torch.device("cpu") model.to(device) test_x = [] image_names = [] # get the image names to label the prediction masks appropriately for path_img in os.listdir(my_path + "/test_model/test_images"): image_names.append(path_img) full_path_img = os.path.join(my_path + "/test_model/test_images", path_img) image = np.array(Image.open(full_path_img).convert("RGB")) augmentations = transform(image=image) test_x.append(augmentations["image"]) test_x = torch.stack(test_x) # the second test_x is a dummy to make the tensor of appropriate dimension so that it may be passed to the model test_data = TensorDataset(test_x, test_x) test_loader = DataLoader( test_data, batch_size=1, num_workers=1, pin_memory=True, shuffle=False, ) Path(my_path + "/test_model/generated_masks/").mkdir(parents=True, exist_ok=True) for file in os.listdir(my_path + "/test_model/generated_masks/"): os.remove(my_path + "/test_model/generated_masks/" + file) save_prediction_masks(test_loader, model, image_names, "test_model/generated_masks/", device)
def train(): # network = EncoderDecoder() network = UNET() network = nn.DataParallel(network) try: if pretrained_model_file_path != None and os.path.isfile(pretrained_model_file_path): network.load_state_dict(torch.load(pretrained_model_file_path)) print('Network weights initialized from file at:', os.path.abspath(pretrained_model_file_path)) except Exception: print('Unable to initialize network weights from file at:', os.path.abspath(pretrained_model_file_path)) network.to(MODEL['DEVICE']) network.train() train_dataset = NoiseDataloader(dataset_type=NoiseDataloader.TRAIN, noisy_per_image=DATASET['NOISY_PER_IMAGE'], noise_type=DATASET['NOISE_TYPE']) train_batcher = DataLoader(dataset=train_dataset, batch_size=MODEL['BATCH_SIZE'], # shuffle=True) shuffle=True, num_workers=MODEL['NUM_WORKERS']) optimizer = optim.Adam(network.parameters(), lr=OPTIMIZER['LR'], betas=OPTIMIZER['BETAS'], eps=OPTIMIZER['EPSILON']) instance = 0 while os.path.isdir(os.path.join(pp.trained_models_folder_path, 'Instance_' + str(instance).zfill(3))): instance += 1 os.mkdir(os.path.join(pp.trained_models_folder_path, 'Instance_' + str(instance).zfill(3))) num_batches = math.floor(len(train_dataset) / MODEL['BATCH_SIZE']) for epoch in range(MODEL['NUM_EPOCHS']): epoch_start_time = time.time() print('-' * 80) print('Epoch: {} of {}...'.format(epoch + 1, MODEL['NUM_EPOCHS'])) epoch_loss = 0 batch_counter = 1 for batch in train_batcher: # Get Batch print('\tProcessing Batch: {} of {}...'.format(batch_counter, num_batches)) batch_counter += 1 input_noisy_patch, output_noisy_patch = batch input_noisy_patch = input_noisy_patch.to(MODEL['DEVICE']) output_noisy_patch = output_noisy_patch.to(MODEL['DEVICE']) denoised_input_patch = network(input_noisy_patch) # Pass Batch loss = OPTIMIZER['LOSS_FUNCTION'](denoised_input_patch, output_noisy_patch) # Calculate Loss epoch_loss += loss optimizer.zero_grad() loss.backward() # Calculate Gradients optimizer.step() # Update Weights print('\tBatch (Train) Loss:', loss) print() epoch_end_time = time.time() torch.save(network.state_dict(), os.path.join(pp.trained_models_folder_path, 'Instance_' + str(instance).zfill(3), 'Model_Epoch_{}.pt'.format(str(epoch).zfill(3)))) print('Epoch (Train) Loss:', epoch_loss) print('Epoch (Train) Time:', epoch_end_time - epoch_start_time) print('-' * 80)