def initialize(args): if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = args.device model = select_model(args) optimizer = select_optimizer(args, model) if (args.cuda): model.cuda() train_params = { 'batch_size': args.batch_size, 'shuffle': True, 'num_workers': 2 } test_params = { 'batch_size': args.batch_size, 'shuffle': False, 'num_workers': 1 } train_loader = COVIDxDataset(mode='train', n_classes=args.classes, dataset_path=args.dataset, dim=(224, 224)) val_loader = COVIDxDataset(mode='test', n_classes=args.classes, dataset_path=args.dataset, dim=(224, 224)) training_generator = DataLoader(train_loader, **train_params) val_generator = DataLoader(val_loader, **test_params) return model, optimizer, training_generator, val_generator
def objective(trial): info = nvmlDeviceGetMemoryInfo(handle) print("Total memory:", info.total) print("Free memory:", info.free) print("Used memory:", info.used) model, training_generator, val_generator, test_generator = initialize(ARGS) optim_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"]) weight_decay = trial.suggest_float("weight_decay", 1e-5, 1e-1, log=True) lr = trial.suggest_float("learning_rate", 1e-7, 1e-5, log=True) trial.set_user_attr("worker_id", WORKER_ID) optimizer = util.select_optimizer(optim_name, model, lr, weight_decay) scheduler = ReduceLROnPlateau(optimizer, factor=0.5, patience=2, min_lr=1e-5, verbose=True) best_pred_loss = 1000.0 for epoch in range(1, EPOCHS + 1): train(ARGS, model, training_generator, optimizer, epoch) val_metrics, confusion_matrix = validation(ARGS, model, val_generator, epoch) scheduler.step(val_metrics._data.average.loss) return val_metrics._data.average.recall_mean
def initialize(self, args): if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = args.device model = select_model(args) optimizer = select_optimizer(args.opt) if (args.cuda): model.cuda() return model, optimizer
def initialize(args): if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device) model = select_model(args) optimizer = select_optimizer(args, model) if (args.cuda): model.cuda() train_params = { 'batch_size': args.batch_size, 'shuffle': True, 'num_workers': 2 } test_params = { 'batch_size': args.batch_size, 'shuffle': False, 'num_workers': 1 } if args.dataset_name == 'COVIDx': train_loader = COVIDxDataset(mode='train', n_classes=args.classes, dataset_path=args.dataset_name, dim=(224, 224)) val_loader = COVIDxDataset(mode='test', n_classes=args.classes, dataset_path=args.dataset_name, dim=(224, 224)) test_loader = None training_generator = DataLoader(train_loader, **train_params) val_generator = DataLoader(val_loader, **test_params) test_generator = None elif args.dataset_name == 'COVID_CT': train_loader = CovidCTDataset( 'train', root_dir='./data/covid_ct_dataset', txt_COVID='./data/covid_ct_dataset/trainCT_COVID.txt', txt_NonCOVID='./data/covid_ct_dataset/trainCT_NonCOVID.txt') val_loader = CovidCTDataset( 'val', root_dir='./data/covid_ct_dataset', txt_COVID='./data/covid_ct_dataset/valCT_COVID.txt', txt_NonCOVID='./data/covid_ct_dataset/valCT_NonCOVID.txt') test_loader = CovidCTDataset( 'test', root_dir='./data/covid_ct_dataset', txt_COVID='./data/covid_ct_dataset/testCT_COVID.txt', txt_NonCOVID='./data/covid_ct_dataset/testCT_NonCOVID.txt') training_generator = DataLoader(train_loader, **train_params) val_generator = DataLoader(val_loader, **test_params) test_generator = DataLoader(test_loader, **test_params) return model, optimizer, training_generator, val_generator, test_generator
def initialize(args): if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device) model = select_model(args) optimizer = select_optimizer(args, model) if (args.cuda): model.cuda() train_loader = COVIDxDataset(mode='train', n_classes=args.classes, dataset_path=args.dataset, dim=(224, 224)) #print(train_loader.) #------ Class weigths for sampling and for loss function ----------------------------------- labels = np.unique(train_loader.labels) print(labels) class_weight = compute_class_weight('balanced', labels, train_loader.labels) #weights_sample = compute_sample_weight('balanced',train_loader.labels) #print(np.unique(weights_sample)) #---------- Alphabetical order in labels does not correspond to class order in COVIDxDataset----- class_weight = class_weight[::-1] #--------------------------------------------------------------------------------- #weights_sample = torch.DoubleTensor(weights_sample) #sampler = torch.utils.data.sampler.WeightedRandomSampler(weights_sample, len(weights_sample)) if (args.cuda): class_weight = torch.from_numpy(class_weight.astype(float)).cuda() else: class_weight = torch.from_numpy(class_weight.astype(float)) #print(class_weight.shape) #------------------------------------------- val_loader = COVIDxDataset(mode='test', n_classes=args.classes, dataset_path=args.dataset, dim=(224, 224)) #------------------------------------------------------------------------------------ train_params = { 'batch_size': args.batch_size, 'shuffle': True, 'num_workers': 4 } #'sampler' : sampler test_params = { 'batch_size': args.batch_size, 'shuffle': True, 'num_workers': 4 } #------------------------------------------------------------------------------------------ training_generator = DataLoader(train_loader, **train_params) val_generator = DataLoader(val_loader, **test_params) return model, optimizer, training_generator, val_generator, class_weight
def main(): plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' save_h5 = False #save_h5 = True args = get_arguments() SEED = args.seed torch.manual_seed(SEED) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(SEED) if torch.cuda.is_available(): torch.cuda.manual_seed(SEED) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Set the path for the dataset args.root_path = 'G:/datasets/covidx_v3_data' weight_path = './save/COVIDNet20200520_0709/COVIDNet_large_best_checkpoint.pt' if save_h5: args.h5 = False dataset, val_dataset, test_generator = initialize_datasets( args, use_transform=False) #dataset, val_dataset, test_generator = initialize_datasets(args, train_size=10, val_size=100, use_transform=False) # Save of the datasets as h5 is desired for ds in (dataset, val_dataset, test_generator): if ds: imgs = [] labels = [] for n, imgTen in enumerate(ds): # Create an h5 version of the image image = imgTen[0].numpy() label = imgTen[1].numpy() imgs.append(image) labels.append(label) imgs = np.array(imgs) labels = np.array(labels) # Create the dataset hf = h5py.File(ds.mode + '.h5', 'w') hf.create_dataset('images', data=imgs) hf.create_dataset('labels', data=labels) hf.close() # Load the h5 datasets if available args.h5 = True #args.h5 = False #dataset, val_dataset, test_dataset = initialize_datasets(args, train_size=300, val_size=100) dataset, val_dataset, test_dataset = initialize_datasets(args) # Reload weights if desired args.batch_size = 256 args.log_interval = 10 args.nEpochs = 100 args.nEpochs = 3 test_list = [ #[True, False, 'COVIDNet_small', False, None, 100, 36, 50, 5e-5], #[True, False, 'COVIDNet_large', False, None, 100, 28, 50, 5e-5], #[True, True, 'resnet18', False, None, 100, 256, 50, 2e-5], #[True, True, 'mobilenet_v2', False, None, 100, 256, 50, 2e-5], #[True, True, 'densenet169', False, None, 100, 256, 50, 2e-5], #[True, True, 'resnext50_32x4d', False, None, 100, 256, 50, 2e-5] # Run Inference only #[False, False, 'resnext50_32x4d', True, "F:/Stanford/CS231N/Project/CS231N/save/resnext50_32x4d_Transfer20200605_0720/best_checkpoint.pt", 100, 16, 50, 2e-5] [ False, False, 'densenet169', True, "F:/Stanford/CS231N/Project/CS231N/save/densenet169_Transfer20200605_0127/best_checkpoint.pt", 100, 32, 50, 2e-5 ] ] # Iterate over tests for trainme, transfer, model_name, reload_weights, weight_path, args.nEpochs, args.batch_size, args.log_interval, args.lr in test_list: if transfer: code = "_Transfer" else: code = "" id = model_name + code + util.datestr() model = select_model(model_name, args.classes) if reload_weights: print("Loading model with weights from: {}".format(weight_path)) checkpoint = torch.load(weight_path) model.load_state_dict(checkpoint['state_dict']) model.to(device) #print(model) # Freeze model if transfer learning if transfer: set_parameter_requires_grad(model) if args.tensorboard and trainme: writer = SummaryWriter('./runs/' + id) data_loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, shuffle=True, pin_memory=False, num_workers=num_workers) images, labels = next(iter(data_loader)) writer.add_graph(model, images.to(device)) img_grid = torchvision.utils.make_grid(images) # show images newimg = matplotlib_imshow(img_grid, one_channel=True) ## write to tensorboard writer.add_image('Xrays', img_grid) else: writer = None if trainme: best_score = 0 optimizer = select_optimizer(args, model) train_loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, shuffle=True, pin_memory=False, num_workers=num_workers) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=False, num_workers=num_workers) for epoch in range(1, args.nEpochs + 1): # Train 1 epoch train_metrics, writer_step = train(model, args, device, writer, optimizer, train_loader, epoch) # Run Inference on val set val_score, confusion_matrix = inference( args, model, val_loader, epoch, writer, device, writer_step) best_score = util.save_model(model, id, args, val_score, epoch, best_score, confusion_matrix) # Just evaluate a trained model else: val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=False, num_workers=num_workers) #inference (args, model, val_loader, 1, writer, device, 0) images, labels = next(iter(val_loader)) images = images.to(device) labels = labels.to(device) show_saliency_maps(model, images, labels)
def main(): args = get_arguments() myargs = [] # getopts(sys.argv) now = datetime.datetime.now() cwd = os.getcwd() if len(myargs) > 0: if 'c' in myargs: config_file = myargs['c'] else: config_file = 'config/trainer_config.yml' config = OmegaConf.load(os.path.join(cwd, config_file))['trainer'] config.cwd = str(cwd) reproducibility(config) dt_string = now.strftime("%d_%m_%Y_%H.%M.%S") cpkt_fol_name = os.path.join( config.cwd, f'checkpoints/model_{config.model.name}/dataset_{config.dataset.name}/date_{dt_string}' ) log = Logger(path=cpkt_fol_name, name='LOG').get_logger() best_pred_loss = 1000.0 log.info(f"Checkpoint folder {cpkt_fol_name}") log.info(f"date and time = {dt_string}") log.info(f'pyTorch VERSION:{torch.__version__}', ) log.info(f'CUDA VERSION') log.info(f'CUDNN VERSION:{torch.backends.cudnn.version()}') log.info(f'Number CUDA Devices: {torch.cuda.device_count()}') if args.tensorboard: writer_path = os.path.join(cpkt_fol_name + 'runs/') writer = SummaryWriter(writer_path + util.datestr()) else: writer = None use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") log.info(f'device: {device}') training_generator, val_generator, test_generator, class_dict = select_dataset( config) n_classes = len(class_dict) model = select_model(config, n_classes) log.info(f"{model}") if (config.load): pth_file, _ = load_checkpoint(config.pretrained_cpkt, model, strict=True, load_seperate_layers=False) else: pth_file = None if (config.cuda and use_cuda): if torch.cuda.device_count() > 1: log.info(f"Let's use {torch.cuda.device_count()} GPUs!") model = torch.nn.DataParallel(model) model.to(device) optimizer, scheduler = select_optimizer(model, config['model'], None) log.info(f'{model}') log.info(f"Checkpoint Folder {cpkt_fol_name} ") shutil.copy(os.path.join(config.cwd, config_file), cpkt_fol_name) trainer = Trainer(config, model=model, optimizer=optimizer, data_loader=training_generator, writer=writer, logger=log, valid_data_loader=val_generator, test_data_loader=test_generator, class_dict=class_dict, lr_scheduler=scheduler, checkpoint_dir=cpkt_fol_name) trainer.train()
def initialize(args): if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device) model = select_model(args) optimizer = select_optimizer(args, model) if (args.cuda): model.cuda() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformer = transforms.Compose([ transforms.Resize(256), transforms.RandomResizedCrop((224), scale=(0.5, 1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) val_transformer = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) train_params = { 'batch_size': args.batch_size, 'shuffle': True, 'num_workers': 2 } test_params = { 'batch_size': args.batch_size, 'shuffle': False, 'num_workers': 1 } if args.dataset_name == 'COVIDx': train_loader = COVIDxDataset(mode='train', n_classes=args.classes, dataset_path=args.dataset, dim=(224, 224)) val_loader = COVIDxDataset(mode='test', n_classes=args.classes, dataset_path=args.dataset, dim=(224, 224)) test_loader = None training_generator = DataLoader(train_loader, **train_params) val_generator = DataLoader(val_loader, **test_params) test_generator = None elif args.dataset_name == 'COVID_CT': train_loader = CovidCTDataset( root_dir='data/covid_ct_dataset', txt_COVID='data/covid_ct_dataset/trainCT_COVID.txt', txt_NonCOVID='data/covid_ct_dataset/trainCT_NonCOVID.txt', transform=train_transformer) val_loader = CovidCTDataset( root_dir='data/covid_ct_dataset', txt_COVID='data/covid_ct_dataset/valCT_COVID.txt', txt_NonCOVID='data/covid_ct_dataset/valCT_NonCOVID.txt', transform=val_transformer) test_loader = CovidCTDataset( root_dir='data/covid_ct_dataset', txt_COVID='data/covid_ct_dataset/testCT_COVID.txt', txt_NonCOVID='data/covid_ct_dataset/testCT_NonCOVID.txt', transform=val_transformer) training_generator = DataLoader(train_loader, **train_params) val_generator = DataLoader(val_loader, **test_params) test_generator = DataLoader(test_loader, **test_params) return model, optimizer, training_generator, val_generator, test_generator