val_fold = CassavaDataset(val_subset, transform=data_transforms['val'])
    train_loader = torch.utils.data.DataLoader(train_fold,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)
    val_loader = torch.utils.data.DataLoader(val_fold,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=4)

    # Create training and validation dataloaders
    dataloaders_dict = {'train': train_loader, 'val': val_loader}

    # Initialize the model for this fold
    fold_model, _ = initialize_model(model_name,
                                     num_classes,
                                     feature_extract,
                                     use_pretrained=True)
    fold_model = fold_model.to(device)
    # fold_optimizer = optim.Adam(fold_model.parameters(), lr=0.0001, weight_decay=0.0001)
    fold_optimizer = optim.Adam(fold_model.parameters(), lr=base_lr)
    # Setup the loss fxn
    fold_criterion = nn.CrossEntropyLoss()

    # Run Training and Validation Step
    _, hist, best_acc = train_model(fold_model,
                                    dataloaders_dict,
                                    fold_criterion,
                                    fold_optimizer,
                                    device,
                                    checkpoint_save_path,
                                    num_epoch_to_stop_if_no_better,
Пример #2
0
    image_tensor = test_transforms(img).float()
    image_tensor = image_tensor.unsqueeze_(0)
    input_tensor = Variable(image_tensor)
    input_tensor = input_tensor.to(de)
    output = loaded_model(input_tensor)
    return output


# Initialize the models to eval
print('Loading checkpoints...')
# model_name_prefix = 'cassava_se_resnext50_32x4d.pth_'
model_name_prefix = 'nocrop_se_resnext101_32x4d.pth_'
list_model = []
for i in range(num_fold):
    fold_model, _ = initialize_model(model_name,
                                     num_classes,
                                     True,
                                     use_pretrained=True)
    ckp_path = 'pytorch_space/' + model_name_prefix + str(i)
    checkpoint = torch.load(ckp_path)
    fold_model.load_state_dict(checkpoint['model_state_dict'])
    fold_model.eval()
    fold_model.to(device)
    list_model.append(fold_model)

# Classify images
print('Classifying...')
test_dir = 'data/test/0'
# test_dir = ''
filenames = os.listdir(test_dir)
with torch.no_grad():
    # for k, fname in enumerate(filenames[:5]):
Пример #3
0
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
#
#

# Initialize the model for this run
model_ft, input_size = initialize_model(model_name,
                                        num_classes,
                                        feature_extract,
                                        use_pretrained=True)

# Print the model we just instantiated
print(model_ft)

# ### Custom dataset


class CassavaDataset(torch.utils.data.Dataset):
    def __init__(self, subset, transform=None):
        self.subset = subset
        self.transform = transform

    def __getitem__(self, index):
        x, y = self.subset[index]