def run(args): indexes = list(range(0, 1130)) random.seed(26) random.shuffle(indexes) for fold in range(0, 8): if fold == 0: train_ind = indexes[0:141] + indexes[282:] valid_ind = indexes[141:282] elif fold == 1: train_ind = indexes[0:282] + indexes[423:] valid_ind = indexes[282:423] elif fold == 2: train_ind = indexes[0:564] + indexes[705:] valid_ind = indexes[564:705] elif fold == 3: train_ind = indexes[:705] + indexes[846:] valid_ind = indexes[705:846] elif fold == 4: train_ind = indexes[:846] + indexes[987:] valid_ind = indexes[846:987] elif fold == 5: train_ind = indexes[:987] valid_ind = indexes[987:] elif fold == 6: train_ind = indexes[141:] valid_ind = indexes[0:141] elif fold == 7: train_ind = indexes[0:423] + indexes[568:] valid_ind = indexes[423:568] log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane) if args.flush_history == 1: objects = os.listdir(log_root_folder) for f in objects: if os.path.isdir(log_root_folder + f): shutil.rmtree(log_root_folder + f) now = datetime.now() logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/" os.makedirs(logdir) writer = SummaryWriter(logdir) augmentor = Compose([ transforms.Lambda(lambda x: torch.Tensor(x)), RandomRotate(25), RandomTranslate([0.11, 0.11]), RandomFlip(), # transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)), ]) mrnet = model.MRNet() if torch.cuda.is_available(): mrnet = mrnet.cuda() optimizer = optim.Adam(mrnet.parameters(), lr=args.lr, weight_decay=0.1) if args.lr_scheduler == "plateau": scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, patience=2, factor=.3, threshold=1e-4, verbose=True) elif args.lr_scheduler == "step": scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=args.gamma) best_val_loss = float('inf') best_val_auc = float(0) num_epochs = args.epochs iteration_change_loss = 0 patience = args.patience log_every = args.log_every t_start_training = time.time() train_dataset = MRDataset(train_ind, '/content/data/', args.task, args.plane, valid=False, transform=augmentor) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False) validation_dataset = MRDataset(valid_ind, '/content/data/', args.task, args.plane, valid=False, transform=None) validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=1, shuffle=- True, num_workers=11, drop_last=False) valid_dataset = MRDataset([0], '/content/data/', args.task, args.plane, valid=True, transform=None) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=- True, num_workers=11, drop_last=False) for epoch in range(num_epochs): current_lr = get_lr(optimizer) t_start = time.time() train_loss, train_auc = train_model(mrnet, train_loader, epoch, num_epochs, optimizer, writer, current_lr, log_every) val_loss, val_auc, test_auc = evaluate_model( mrnet, validation_loader, valid_loader, epoch, num_epochs, writer, current_lr) if args.lr_scheduler == 'plateau': scheduler.step(val_loss) elif args.lr_scheduler == 'step': scheduler.step() t_end = time.time() delta = t_end - t_start print( "fold : {0} | train loss : {1} | train auc {2} | val loss {3} | val auc {4} | elapsed time {5} s" .format(fold, train_loss, train_auc, val_loss, val_auc, delta)) iteration_change_loss += 1 print('-' * 30) if val_auc > best_val_auc: best_val_auc = val_auc if bool(args.save_model): file_name = f'model_fold{fold}_{args.prefix_name}_{args.task}_{args.plane}_test_auc_{test_auc:0.4f}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth' for f in os.listdir('./models/'): if (args.task in f) and (args.prefix_name in f) and ('fold' + str(fold) in f): os.remove(f'./models/{f}') torch.save(mrnet, f'./models/{file_name}') if val_loss < best_val_loss: best_val_loss = val_loss iteration_change_loss = 0 if iteration_change_loss == patience: print( 'Early stopping after {0} iterations without the decrease of the val loss' .format(iteration_change_loss)) break t_end_training = time.time() print(f'training took {t_end_training - t_start_training} s')
def run(args): log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane) if args.flush_history == 1: objects = os.listdir(log_root_folder) for f in objects: if os.path.isdir(log_root_folder + f): shutil.rmtree(log_root_folder + f) now = datetime.now() logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/" os.makedirs(logdir) writer = SummaryWriter(logdir) augmentor = Compose([ transforms.Lambda(lambda x: torch.Tensor(x)), RandomRotate(25), RandomTranslate([0.11, 0.11]), #RandomFlip(), transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)), ]) train_dataset = MRDataset('./data/', args.task, args.plane, transform=augmentor, train=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False) validation_dataset = MRDataset('./data/', args.task, args.plane, train=False) validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=1, shuffle=- True, num_workers=0, drop_last=False) #dataloader = 1 36 3 256 256 #for a in validation_loader: # image = a[0] #image, label, weight # plt.imshow(np.transpose(image, (1,2,0))) # code.interact(local = locals()) #image.shape #plt.figure #plt.imshow(np.transpose(image.numpy(), (4,5,3))) #plt.show() #plt.figure() #plt.plot(data[0].t().numpy()) mrnet = model.MRNet() images, labels, weights = next(iter(train_loader)) grid = torchvision.utils.make_grid(images[0]) grid_np = grid.numpy().astype('uint8').transpose(1, 2, 0) import cv2 cv2.imwrite('output.png', grid_np) writer.add_image('images', grid, 0) writer.add_graph(mrnet, images) torch.onnx.export(mrnet, images, "output.onnx") #writer.close() if torch.cuda.is_available(): mrnet = mrnet.cuda() optimizer = optim.Adam(mrnet.parameters(), lr=args.lr, weight_decay=0.1) if args.lr_scheduler == "plateau": scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, factor=.3, threshold=1e-4, verbose=True) elif args.lr_scheduler == "step": scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=args.gamma) best_val_loss = float('inf') best_val_auc = float(0) num_epochs = args.epochs iteration_change_loss = 0 patience = args.patience log_every = args.log_every t_start_training = time.time() for epoch in range(num_epochs): current_lr = get_lr(optimizer) t_start = time.time() train_loss, train_auc = train_model(mrnet, train_loader, epoch, num_epochs, optimizer, writer, current_lr, log_every) precision, recall, f1_score, accuracy, val_loss, val_auc = evaluate_model( mrnet, validation_loader, epoch, num_epochs, writer, current_lr) if args.lr_scheduler == 'plateau': scheduler.step(val_loss) elif args.lr_scheduler == 'step': scheduler.step() t_end = time.time() delta = t_end - t_start print( "val precision : {0} | val recall : {1} | val accuracy : {2} | val f1 score : {3} | train loss : {4} | train auc {5} | val loss {6} | val auc {7} | elapsed time {8} s" .format(precision, recall, accuracy, f1_score, train_loss, train_auc, val_loss, val_auc, delta)) iteration_change_loss += 1 print('-' * 30) if val_auc > best_val_auc: best_val_auc = val_auc if bool(args.save_model): file_name = f'model_{args.prefix_name}_{args.task}_{args.plane}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth' for f in os.listdir('./models/'): if (args.task in f) and (args.plane in f) and (args.prefix_name in f): os.remove(f'./models/{f}') torch.save(mrnet, f'./models/{file_name}') if val_loss < best_val_loss: best_val_loss = val_loss iteration_change_loss = 0 if iteration_change_loss == patience: print( 'Early stopping after {0} iterations without the decrease of the val loss' .format(iteration_change_loss)) break #writer.add_scalar('Val/precision_epoch', precision, epoch) #writer.add_scalar('Val/recall_epoch', recall, epoch) #writer.add_scalar('Val/recall_epoch', accuracy, epoch) #writer.add_scalar('Val/f1_epoch', f1_score, epoch) #writer.close t_end_training = time.time() print(f'training took {t_end_training - t_start_training} s')
def run(args): log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane) if args.flush_history == 1: objects = os.listdir(log_root_folder) for f in objects: if os.path.isdir(log_root_folder + f): shutil.rmtree(log_root_folder + f) now = datetime.now() logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/" os.makedirs(logdir) writer = SummaryWriter(logdir) augmentor = Compose([ transforms.Lambda(lambda x: torch.Tensor(x)), RandomRotate(25), RandomTranslate([0.11, 0.11]), RandomFlip(), transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)), ]) train_dataset = MRDataset('./data/', args.task, args.plane, transform=augmentor, train=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False) validation_dataset = MRDataset('./data/', args.task, args.plane, train=False) validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=1, shuffle=- True, num_workers=11, drop_last=False) mrnet = model.MRNet() mrnet = mrnet.cuda() optimizer = optim.Adam(mrnet.parameters(), lr=1e-5, weight_decay=0.1) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, factor=.3, threshold=1e-4, verbose=True) best_val_loss = float('inf') best_val_auc = float(0) num_epochs = args.epochs iteration_change_loss = 0 patience = args.patience for epoch in range(num_epochs): train_loss, train_auc = train_model(mrnet, train_loader, epoch, num_epochs, optimizer, writer) val_loss, val_auc = evaluate_model(mrnet, validation_loader, epoch, num_epochs, writer) print("train loss : {0} | train auc {1} | val loss {2} | val auc {3}". format(train_loss, train_auc, val_loss, val_auc)) if args.lr_scheduler == 1: scheduler.step(val_loss) iteration_change_loss += 1 print('-' * 30) if val_auc > best_val_auc: best_val_auc = val_auc if bool(args.save_model): file_name = f'model_{args.task}_{args.plane}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth' for f in os.listdir('./models/'): if (args.task in f) and (args.plane in f): os.remove(f'./models/{f}') torch.save(mrnet, f'./models/{file_name}') if val_loss < best_val_loss: best_val_loss = val_loss iteration_change_loss = 0 if iteration_change_loss == patience: print( 'Early stopping after {0} iterations without the decrease of the val loss' .format(iteration_change_loss)) break
augmentor = Compose([ transforms.Lambda(lambda x: torch.Tensor(x)), RandomRotate(25), RandomTranslate([0.11, 0.11]), RandomFlip(), transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)), ]) #Load Dataset train_dataset = MRDataset('./data/', True, 'axial', transform=augmentor, train=True) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False) validation_dataset = MRDataset( './data/', False, 'axial', train=False) validation_loader = torch.utils.data.DataLoader( validation_dataset, batch_size=1, shuffle=-True, num_workers=11, drop_last=False) mrnet = model.MRNet() #Train mrnet.compute_training(train_loader,validation_loader)
def run(args): log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane) if args.flush_history == 1: objects = os.listdir(log_root_folder) for f in objects: if os.path.isdir(log_root_folder + f): shutil.rmtree(log_root_folder + f) now = datetime.now() logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/" os.makedirs(logdir) writer = SummaryWriter(logdir) augmentor = Compose([ transforms.Lambda(lambda x: torch.Tensor(x)), RandomRotate(25), RandomTranslate([0.11, 0.11]), RandomFlip(), transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)), ]) train_dataset = MRDataset('/home/niamh/Documents/MRNET/data/', args.task, args.plane, transform=augmentor, train=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False) validation_dataset = MRDataset('/home/niamh/Documents/MRNET/data/', args.task, args.plane, train=False) validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=1, shuffle=- True, num_workers=11, drop_last=False) mrnet = model.MRNet() if torch.cuda.is_available(): mrnet = mrnet.cuda() optimizer = optim.Adam(mrnet.parameters(), lr=args.lr, weight_decay=0.1) if args.lr_scheduler == "plateau": scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, factor=.3, threshold=1e-4, verbose=True) elif args.lr_scheduler == "step": scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=args.gamma) best_val_loss = float('inf') best_val_auc = float(0) num_epochs = args.epochs iteration_change_loss = 0 patience = args.patience log_every = args.log_every t_start_training = time.time() counting = 0 for epoch in range(num_epochs): print(counting) current_lr = get_lr(optimizer) t_start = time.time() train_loss, train_auc = train_model(mrnet, train_loader, epoch, num_epochs, optimizer, writer, current_lr, counting, log_every) val_loss, val_auc = evaluate_model(mrnet, validation_loader, epoch, num_epochs, writer, counting, current_lr) counting = counting + 1 if args.lr_scheduler == 'plateau': scheduler.step(val_loss) elif args.lr_scheduler == 'step': scheduler.step() t_end = time.time() delta = t_end - t_start print( "train loss : {0} | train auc {1} | val loss {2} | val auc {3} | elapsed time {4} s" .format(train_loss, train_auc, val_loss, val_auc, delta)) iteration_change_loss += 1 print('-' * 30) if val_auc > best_val_auc: best_val_auc = val_auc if bool(args.save_model): file_name = f'model_{args.prefix_name}_{args.task}_{args.plane}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth' for f in os.listdir('./models/'): if (args.task in f) and (args.plane in f) and (args.prefix_name in f): os.remove(f'./models/{f}') torch.save(mrnet, f'./models/{file_name}') if val_loss < best_val_loss: best_val_loss = val_loss iteration_change_loss = 0 if iteration_change_loss == patience: print( 'Early stopping after {0} iterations without the decrease of the val loss' .format(iteration_change_loss)) break t_end_training = time.time() print(f'training took {t_end_training - t_start_training} s')