def experiment_5(net): logging.info(f'\nRunning experiment 5\n') glaucoma_model_path = os.path.join(BASE_OUTPUT, MODELS['glaucoma']) glaucoma_train_set, _ = load_and_transform_data(GLAUCOMA_DATA, BATCH_SIZE, data_augmentation=False) net = train_model(model=net, device=device, train_loader=glaucoma_train_set, epochs=GLAUCOMA_EPOCHS, batch_size=BATCH_SIZE, lr=LEARNING_RATE) torch.save(net.state_dict(), glaucoma_model_path) # Generate run test rt = ScoreCalciumSelection() folds_acc = [] torch.save(net.state_dict(), 'tmp_model.pt') for i in range(5): rt.generate_run_set(i + 1) net.load_state_dict(torch.load('tmp_model.pt')) data_loader_test, n_classes = load_and_transform_data( os.path.join(SCORE_CALCIUM_DATA, TEST)) folds_acc.append(evaluate_model(net, data_loader_test, device)) # Confident interval computation mean, stdev, offset, ci = statistics.get_fold_metrics(folds_acc) logging.info(f'[Experiment 5] --> Model performance:') logging.info(f' Folds Acc.: {folds_acc}') logging.info(f' Mean: {mean}') logging.info(f' Stdev: {stdev}') logging.info(f' Offset: {offset}') logging.info(f' CI:(95%) : {ci}')
def experiment_1(net): logging.info(f'\nRunning experiment 1\n') for param in net.features.parameters(): assert param.requires_grad == True for param in net.classifier.parameters(): assert param.requires_grad == True # Generate run test rt = ScoreCalciumSelection() folds_acc = [] torch.save(net.state_dict(), 'tmp_model.pt') for i in range(5): rt.generate_run_set(i + 1) net.load_state_dict(torch.load('tmp_model.pt')) data_loader_test, n_classes = load_and_transform_data( os.path.join(SCORE_CALCIUM_DATA, TEST)) folds_acc.append(evaluate_model(net, data_loader_test, device)) # Confident interval computation mean, stdev, offset, ci = statistics.get_fold_metrics(folds_acc) logging.info(f'[Experiment 1] --> Model performance:') logging.info(f' Folds Acc.: {folds_acc}') logging.info(f' Mean: {mean}') logging.info(f' Stdev: {stdev}') logging.info(f' Offset: {offset}') logging.info(f' CI:(95%) : {ci}')
def experiment_13(net): logging.info(f'\nRunning experiment 13\n') glaucoma_model_path = os.path.join(BASE_OUTPUT, MODELS['glaucoma']) net.load_state_dict(torch.load(glaucoma_model_path)) for param in net.features.parameters(): param.requires_grad = False for param in net.classifier.parameters(): param.requires_grad = False for param in net.classifier[-1].parameters(): param.requires_grad = True for param in net.features.parameters(): assert param.requires_grad == False for param in net.classifier[:-1].parameters(): assert param.requires_grad == False for param in net.classifier[-1].parameters(): assert param.requires_grad == True # Generate run test rt = ScoreCalciumSelection() folds_acc = [] torch.save(net.state_dict(), 'tmp_model.pt') for i in range(5): rt.generate_run_set(i + 1) net.load_state_dict(torch.load('tmp_model.pt')) # Load and transform datasets train_data_loader, n_classes = load_and_transform_data( os.path.join(SCORE_CALCIUM_DATA, TRAIN), BATCH_SIZE, data_augmentation=False) net = train_model(model=net, device=device, train_loader=train_data_loader, epochs=EPOCHS, batch_size=BATCH_SIZE, lr=LEARNING_RATE) # test model data_loader_test, n_classes = load_and_transform_data( os.path.join(SCORE_CALCIUM_DATA, TEST)) folds_acc.append(evaluate_model(net, data_loader_test, device)) # Confident interval computation mean, stdev, offset, ci = statistics.get_fold_metrics(folds_acc) logging.info(f'[Experiment 13] --> Model performance:') logging.info(f' Folds Acc.: {folds_acc}') logging.info(f' Mean: {mean}') logging.info(f' Stdev: {stdev}') logging.info(f' Offset: {offset}') logging.info(f' CI:(95%) : {ci}')
VGG-16 pretrained simple example. Trying to understand both data augmentation and output values (Model robustness) """ # Login instance logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') # Import device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'Using device: {device}') # Define model architecture model_architecture = 'vgg16' # Generate run test rt = ScoreCalciumSelection() folds_acc = [] for i in range(5): t0 = time.time() rt.generate_run_set(i + 1) logging.info(f'Generate test with fold {i + 1}') # test model logging.info("Test model before training") data_loader_test, n_classes = load_and_transform_data( os.path.join(SCORE_CALCIUM_DATA, TEST)) # Init model if exists in order to avoid random intit of weights on each iteration net = modify_net_architecture(n_classes, freeze_layers=False,
VGG-16 pretrained simple example. Trying to understand both data augmentation and output values (Model robustness) """ # Login instance logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') # Import device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'Using device: {device}') # Define model architecture model_architecture = 'vgg16' # Generate run test rt = ScoreCalciumSelection(criteria='leave-one-out') folds_acc = [] for i in range(N_SAMPLES): t0 = time.time() rt.generate_run_set(i) logging.info(f'Generate test sample {i}') # test model logging.info("Test model before training") data_loader_test, n_classes = load_and_transform_data( os.path.join(SCORE_CALCIUM_DATA, TEST)) # Init model if exists in order to avoid random intit of weights on each iteration net = modify_net_architecture(n_classes, freeze_layers=True,