def train(rundir, diagnosis, epochs, learning_rate, use_gpu): train_loader, valid_loader, test_loader = load_data(diagnosis, use_gpu) model = MRNet() if use_gpu: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=.01) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=.3, threshold=1e-4) best_val_loss = float('inf') start_time = datetime.now() for epoch in range(epochs): change = datetime.now() - start_time print('starting epoch {}. time passed: {}'.format(epoch+1, str(change))) train_loss, train_auc, _, _ = run_model(model, train_loader, train=True, optimizer=optimizer) print(f'train loss: {train_loss:0.4f}') print(f'train AUC: {train_auc:0.4f}') val_loss, val_auc, _, _ = run_model(model, valid_loader) print(f'valid loss: {val_loss:0.4f}') print(f'valid AUC: {val_auc:0.4f}') scheduler.step(val_loss) if val_loss < best_val_loss: best_val_loss = val_loss file_name = f'val{val_loss:0.4f}_train{train_loss:0.4f}_epoch{epoch+1}' save_path = Path(rundir) / file_name torch.save(model.state_dict(), save_path)
def train(rundir, path, epochs, learning_rate, use_gpu): rundir = rundir + '/' train_loader, valid_loader, test_loader = load_data(path, use_gpu) model = TripleMRNet() #1, 32, 64 if use_gpu: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=0.01) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=.3, threshold=1e-5) best_val_loss = float('inf') start_time = datetime.now() for epoch in range(epochs): change = datetime.now() - start_time print('starting epoch {}. time passed: {}\n'.format( epoch + 1, str(change))) train_loss, train_auc, train_accuracy, _, _ = run_model( model, train_loader, train=True, optimizer=optimizer) print(f'train loss: {train_loss:0.4f}') print(f'train AUC_abnormal: {train_auc[0]:0.4f}') print(f'train AUC_acl: {train_auc[1]:0.4f}') print(f'train AUC_meniscus: {train_auc[2]:0.4f}\n') #print(f'train accuracy_abnormal: {train_accuracy[0]:0.4f}') #print(f'train accuracy_acl: {train_accuracy[1]:0.4f}') #print(f'train accuracy_meniscus: {train_accuracy[2]:0.4f}\n') val_loss, val_auc, val_accuracy, _, _ = run_model(model, valid_loader) print(f'valid loss: {val_loss:0.4f}') print(f'valid AUC_abnormal: {val_auc[0]:0.4f}') print(f'valid AUC_acl: {val_auc[1]:0.4f}') print(f'valid AUC_meniscus: {val_auc[2]:0.4f}\n') #print(f'valid accuracy_abnormal: {val_accuracy[0]:0.4f}') #print(f'valid accuracy_acl: {val_accuracy[1]:0.4f}') #print(f'valid accuracy_meniscus: {val_accuracy[2]:0.4f}\n') scheduler.step(val_loss) if val_loss < best_val_loss: best_val_loss = val_loss file_name = f'val{val_loss:0.4f}_train{train_loss:0.4f}_epoch{epoch+1}' save_path = Path(rundir) / file_name torch.save(model.state_dict(), save_path)
def train(rundir, path, epochs, learning_rate, use_gpu): data_train, data_valid, data_test, data_A, data_B, data_D = load_data(path) train_loader = DataLoader(data_train, batch_size=32, num_workers=12, shuffle=True) valid_loader = DataLoader(data_valid, batch_size=32, num_workers=12, shuffle=True) model = CNNNet() if use_gpu: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=0.01) #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=.3, threshold=1e-5) best_val_acc = -float('inf') start_time = datetime.now() for epoch in range(epochs): change = datetime.now() - start_time print('starting epoch {}. time passed: {}\n'.format( epoch + 1, str(change))) train_loss, train_acc, _, _ = run_model(model, train_loader, train=True, optimizer=optimizer) print(f'train loss: {train_loss:0.6f}') print(f'train accuracy: {train_acc:0.6f}\n') val_loss, val_acc, _, _ = run_model(model, valid_loader) print(f'valid loss: {val_loss:0.6f}') print(f'valid accuracy: {val_acc:0.6f}\n') #scheduler.step(val_loss) if val_acc >= best_val_acc: best_val_acc = val_acc file_name = f'val{val_acc:0.6f}_train{train_acc:0.6f}_epoch{epoch+1}' save_path = Path(rundir) / file_name torch.save(model.state_dict(), save_path)
def train(rundir, task, backbone, epochs, learning_rate, use_gpu, abnormal_model_path=None): train_loader, valid_loader = load_data(task, use_gpu) model = TripleMRNet(backbone=backbone) for dirpath, dirnames, files in os.walk(args.rundir): if not files: break max_epoch = 0 model_path = None for fname in files: if fname.endswith(".json"): continue ep = int(fname[27:]) if ep >= max_epoch: max_epoch = ep model_path = os.path.join(dirpath, fname) if model_path: state_dict = torch.load(model_path, map_location=(None if use_gpu else 'cpu')) model.load_state_dict(state_dict) if use_gpu: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=.01) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=.3, threshold=1e-4) best_val_loss = float('inf') start_time = datetime.now() epoch = 0 if max_epoch: epoch += max_epoch while epoch < epochs: change = datetime.now() - start_time print('starting epoch {}. time passed: {}'.format(epoch+1, str(change))) train_loss, train_auc, _, _ = run_model( model, train_loader, train=True, optimizer=optimizer, abnormal_model_path=abnormal_model_path) print(f'train loss: {train_loss:0.4f}') print(f'train AUC: {train_auc:0.4f}') val_loss, val_auc, _, _ = run_model(model, valid_loader, abnormal_model_path=abnormal_model_path) print(f'valid loss: {val_loss:0.4f}') print(f'valid AUC: {val_auc:0.4f}') scheduler.step(val_loss) if val_loss < best_val_loss: best_val_loss = val_loss file_name = f'val{val_loss:0.4f}_train{train_loss:0.4f}_epoch{epoch+1}' save_path = Path(rundir) / file_name torch.save(model.state_dict(), save_path) epoch += 1
def train(rundir, diagnosis, dataset, epochs, learning_rate, use_gpu, attention): models = [] if (dataset == 0): train_loader, valid_loader, test_loader = external_load_data( diagnosis, use_gpu) models.append((MRNet(useMultiHead=attention), train_loader, valid_loader, 'external_validation')) elif (dataset == 1): train_loaders, valid_loaders = mr_load_data(diagnosis, use_gpu) train_loader_sag, train_loader_ax, train_loader_cor = train_loaders valid_loader_sag, valid_loader_ax, valid_loader_cor = valid_loaders models = [(MRNet(max_layers=51), train_loader_sag, valid_loader_sag, 'sagittal'), (MRNet(max_layers=61), train_loader_ax, valid_loader_ax, 'axial'), (MRNet(max_layers=58), train_loader_cor, valid_loader_cor, 'coronal')] for model, train_loader, valid_loader, fname in models: if use_gpu: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=.01) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=.3, threshold=1e-4) best_val_loss = float('inf') start_time = datetime.now() for epoch in range(epochs): change = datetime.now() - start_time print('starting epoch {}. time passed: {}'.format( epoch + 1, str(change))) train_loss, train_auc, _, _ = run_model(model, train_loader, train=True, optimizer=optimizer) print(f'train loss: {train_loss:0.4f}') print(f'train AUC: {train_auc:0.4f}') val_loss, val_auc, _, _ = run_model(model, valid_loader) print(f'valid loss: {val_loss:0.4f}') print(f'valid AUC: {val_auc:0.4f}') scheduler.step(val_loss) if val_loss < best_val_loss: best_val_loss = val_loss file_name = f'val{val_loss:0.4f}_train{train_loss:0.4f}_epoch{epoch+1}' save_path = Path(rundir) / fname / file_name folder = rundir + '/' + fname try: for f in os.listdir(folder): os.remove(folder + '/' + f) torch.save(model, save_path) except: os.makedirs(rundir + '/' + fname) torch.save(model, save_path)