def main(): model = UNET(in_channels=1, out_channels=1).to(device=DEVICE) loss_fn = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) train_loader, val_loader = get_loaders(TRAIN_DIR, VAL_DIR, BATCH_SIZE, NUM_WORKER, PIN_MEMORY) if LOAD_MODEL: load_checkpoint(torch.load("mycheckpoint.pth.tar"), model) scaler = torch.cuda.amp.GradScaler() for epoch in range(NUM_EPOCHS): train_fn(train_loader, model, optimizer, loss_fn, scaler) # TODO : save model checkpoint = { 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() } save_checkpoint(checkpoint) # TODO : check acuuracy check_accuracy(val_loader, model, device=DEVICE) # TODO : Print results to folder save_predictions_as_imgs(val_loader, model, folder='saved_imgs/')
def main(): train_transform = A.Compose([ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Rotate(limit=35, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.1), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) val_transforms = A.Compose([ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) model = UNET(3, 1).to(DEVICE) loss_fn = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) train_loader, val_loader = get_loaders( TRAIN_IMG_DIR, TRAIN_MASK_DIR, VAL_IMG_DIR, VAL_MASK_DIR, BATCH_SIZE, train_transform, val_transforms, NUM_WORKERS, PIN_MEMORY, ) check_accuracy(val_loader, model, device=DEVICE) scaler = torch.cuda.amp.GradScaler() for epoch in range(NUM_EPOCHS): train_fn(train_loader, model, optimizer, loss_fn, scaler) checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), } save_checkpoint(checkpoint) # check accuracy check_accuracy(val_loader, model, device=DEVICE) # print some examples to a folder save_predictions_as_imgs(val_loader, model, folder="saved_images/", device=DEVICE)
def main(): train_transform = A.Compose([ A.Resize(height=config.IMAGE_HEIGHT, width=config.IMAGE_WIDTH), A.Rotate(limit=35, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.1), A.normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0), ToTensorV2, ]) val_transform = A.Compose([ A.Resize(height=config.IMAGE_HEIGHT, width=config.IMAGE_WIDTH), A.normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0), ToTensorV2, ]) model = UNet(in_channels=3, out_channels=1).to(config.DEVICE) loss_fn = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE) train_loader, val_loader = get_loaders( config.TRAIN_IMAGE_DIR, config.TRAIN_MASK_DIR, config.VAL_IMG_DIR, config.VAL_MASK_DIR, config.BATCH_SIZE, train_transform, val_transform, ) if config.LOAD_MODEL: load_checkpoint(torch.load('my_checkpoint.pth.tar'), model) scaler = torch.cuda.amp.GradScaler() for epoch in range(config.NUM_EPOCHS): train_fn(train_loader, model, optimizer, loss_fn, scaler) # save model checkpoint = { 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), } save_checkpoint(checkpoint) # check acc check_accuracy(val_loader, model, device=config.DEVICE) # print some examples to a folder save_predictions_as_imgs(val_loader, model, folder='saved_images', device=config.DEVICE)
def main(): train_transforms = A.Compose([ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Rotate(limit=35, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.1), A.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0), ToTensorV2() ]) val_transforms = A.Compose([ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0), ToTensorV2() ]) model = UNET(in_channels=3, out_channels=1).to(DEVICE) loss_fn = nn.BCEWithLogitsLoss() loss_fn = ComboLoss({'bce': 0.4, 'dice': 0.5, 'focal': 0.1}) optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) train_loader, val_loader = get_loaders(TRAIN_IMG_DIR, TRAIN_MASK_DIR, VAL_IMG_DIR, VAL_MASK_DIR, BATCH_SIZE, train_transforms, val_transforms) if LOAD_MODEL: load_checkpoint(torch.load('checkpoint.pth'), model) scaler = torch.cuda.amp.GradScaler() for epoch in range(NUM_EPOCHS): train_fn(train_loader, model, optimizer, loss_fn, scaler) #save model checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict() } # save_checkpoint(checkpoint) #check accuracy check_accuracy(val_loader, model, DEVICE) #print some example to a folder save_predictions_as_imgs(val_loader, model, device=DEVICE)
def predict(): print('asdasdasd') model = UNET(in_channels=3, out_channels=1).to(DEVICE) load_checkpoint(torch.load("my_checkpoint.pth.tar", map_location=torch.device('cpu')), model) train_transform = A.Compose( [ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Rotate(limit=35, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.1), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) val_transforms = A.Compose( [ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) train_loader, val_loader = get_loaders( TRAIN_IMG_DIR, TRAIN_MASK_DIR, VAL_IMG_DIR, VAL_MASK_DIR, BATCH_SIZE, train_transform, val_transforms, NUM_WORKERS, PIN_MEMORY, ) save_predictions_as_imgs( val_loader, model, folder="saved_images/", device=DEVICE )
def main(): model = UNET(in_channels=3, out_channels=1).to(config.DEVICE) BCE = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE) train_loader, val_loader = get_loaders( train_dir=config.TRAIN_IMG_DIR, train_mask_dir=config.TRAIN_MASK_DIR, val_dir=config.VAL_IMG_DIR, val_mask_dir=config.VAL_MASK_DIR, batch_size=config.BATCH_SIZE, train_transform=config.train_transform, val_transform=config.val_transform, num_workers=config.NUM_WORKERS, pin_memory=config.PIN_MEMORY, ) if config.LOAD_MODEL: load_checkpoint(torch.load(config.CHECKPOINT_PTH), model) check_accuracy(val_loader, model) scaler = torch.cuda.amp.GradScaler() for epoch in range(config.NUM_EPOCHS): train_fn(train_loader, model, optimizer, BCE, scaler, val_loader) # save model if config.SAVE_MODEL: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), } save_checkpoint(checkpoint) # check accuracy check_accuracy(val_loader, model) # print some example save_predictions_as_imgs(val_loader, model, folder=config.SAVE_IMAGES)
def train_fn(loader, model, optimizer, BCE, scaler, val_loader): loop = tqdm(loader) for batch_idx, (data, targets) in enumerate(loop): data = data.to(device=config.DEVICE) targets = targets.float().unsqueeze(1).to(device=config.DEVICE) # forward with torch.cuda.amp.autocast(): predictions = model(data) loss = BCE(predictions, targets) # backward optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() # update tqdm loop loop.set_postfix(loss=loss.item()) save_predictions_as_imgs(val_loader, model, folder=config.SAVE_IMAGES)
def main(): # TODO: Might be worth trying the normalization from assignment 2 train_transform = A.Compose([ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Rotate(limit=35, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.1), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) val_transforms = A.Compose([ A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) model = UNET(in_channels=3, out_channels=1).to(DEVICE) """ We're using with logitsLoss because we're not using sigmoid on the, final output layer. If we wanted to have several output channels, we'd change the loss_fn to a cross entropy loss instead. """ loss_fn = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) train_loader, val_loader = get_loaders( TRAIN_IMG_DIR, TRAIN_MASK_DIR, VAL_IMG_DIR, VAL_MASK_DIR, BATCH_SIZE, train_transform, val_transforms, NUM_WORKERS, PIN_MEMORY, ) if LOAD_MODEL: load_checkpoint(torch.load("my_checkpoint.pth.tar"), model) scaler = torch.cuda.amp.GradScaler( ) # Scales the gradients to avoid underflow. Requires a GPU for epoch in range(NUM_EPOCHS): train_fn(train_loader, model, optimizer, loss_fn, scaler) # save model checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), } save_checkpoint(checkpoint) # check accuracy check_accuracy(val_loader, model, device=DEVICE) # print some examples to a folder save_predictions_as_imgs(val_loader, model, folder="saved_images/", device=DEVICE)
def main(cfg: DictConfig): """ Set your device """ gpu_no = 0 # gpu_number DEVICE = torch.device(f'cuda:{gpu_no}' if torch.cuda.is_available() and cfg.trainingInput.DEVICE.CUDA else 'cpu') print(f"device: { DEVICE }") """ Data augmentation params (ref) https://github.com/DoranLyong/DeepLearning-model-factory/blob/master/ML_tutorial/PyTorch/Basics/Albumentations-image_augmentation_tutorial/04_cvt2pytorch.py """ train_transform = A.Compose( [ A.Resize(height=cfg.hyperparams.IMAGE_HEIGHT, width=cfg.hyperparams.IMAGE_WIDTH), A.Rotate(limit=35, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.1), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, # 분모 값이 최대 255 => uint8 이미지에 대해 정규화 ), ToTensorV2(), # Albumentations to torch.Tensor ], ) val_transforms = A.Compose([ A.Resize(height=cfg.hyperparams.IMAGE_HEIGHT, width=cfg.hyperparams.IMAGE_WIDTH), A.Normalize( mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], max_pixel_value=255.0, ), ToTensorV2(), ], ) """ Initialize the network """ model = UNET(in_channels=3, out_channels=1).to(DEVICE) print(f"Model: {model}") model = torch.nn.DataParallel( model ) # 데이터 병렬처리 # (ref) https://tutorials.pytorch.kr/beginner/blitz/data_parallel_tutorial.html # 속도가 더 빨라지진 않음 # (ref) https://tutorials.pytorch.kr/beginner/former_torchies/parallelism_tutorial.html # 오히려 느려질 수 있음 # (ref) https://medium.com/daangn/pytorch-multi-gpu-%ED%95%99%EC%8A%B5-%EC%A0%9C%EB%8C%80%EB%A1%9C-%ED%95%98%EA%B8%B0-27270617936b cudnn.benchmark = True """ Loss and optimizer """ loss_fn = nn.BCEWithLogitsLoss( ) # Binary-CrossEntropy + sigmoid layer ; 클래스가 2개인 binary case에 대한 학습용 # (ref) https://nuguziii.github.io/dev/dev-002/ # (ref) https://youtu.be/IHq1t7NxS8k?t=2199 optimizer = optim.Adam(model.parameters(), lr=cfg.hyperparams.LEARNING_RATE) """ Get dataloader (ref) https://youtu.be/IHq1t7NxS8k?t=2271 """ train_loader, val_loader = get_loaders( cfg.dataPath.TRAIN_IMG_DIR, cfg.dataPath.TRAIN_MASK_DIR, cfg.dataPath.VAL_IMG_DIR, cfg.dataPath.VAL_MASK_DIR, cfg.hyperparams.BATCH_SIZE, train_transform, val_transforms, cfg.trainingInput.NUM_WORKERS, cfg.trainingInput.PIN_MEMORY, # 이거 처음보는데 pin_memory에 대해 더 알아볼 것 ) """ Gradient Scaling (ref) https://pytorch.org/docs/stable/amp.html#gradient-scaling """ scaler = torch.cuda.amp.GradScaler() """ Load the checkpoint """ if cfg.trainingInput.LOAD_MODEL: checkpoint = torch.load( "my_checkpoint.pth.tar" ) # (ref) https://pytorch.org/docs/stable/generated/torch.load.html#torch.load load_checkpoint(checkpoint, model, optimizer) """ Start the training-loop """ for epoch in range(cfg.hyperparams.NUM_EPOCHS): # Run training train_fn(train_loader, model, optimizer, loss_fn, scaler, DEVICE, cfg.hyperparams.BATCH_SIZE, cfg.hyperparams.NUM_EPOCHS, epoch) # save model checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), } # (ref) https://github.com/DoranLyong/DeepLearning-model-factory/blob/master/ML_tutorial/PyTorch/Basics/06_model_loadsave_CNN.py save_checkpoint(checkpoint) # Validation test check_accuracy(val_loader, model, device=DEVICE) # print some examples to a folder save_predictions_as_imgs(val_loader, model, folder="saved_pred_images/", device=DEVICE, cur_epoch=epoch)