Beispiel #1
0
    USE_CUDA = torch.cuda.is_available()
    device = torch.device("cuda" if USE_CUDA else "cpu")
    job_number = int(args.model)  # get job number
    pool = args.pool
    HOME_PATH = '/gpfs/data/denizlab/Users/bz1030/data/OAI_processed/mix/'
    summary_path = '/gpfs/data/denizlab/Users/bz1030/data/OAI_processed/'
    model_file_path = '/gpfs/data/denizlab/Users/bz1030/KneeNet/KneeProject/model/model_torch/model_densenet/model_large_weights3/epoch_17.pth'
    val = pd.read_csv(summary_path + 'test.csv').sample(
        n=5).reset_index()  # split train - test set.
    tensor_transform_test = transforms.Compose([
        CenterCrop(896),
        transforms.ToTensor(),
        lambda x: x.float(),
    ])
    dataset_test = KneeGradingDataset(val,
                                      HOME_PATH,
                                      tensor_transform_test,
                                      stage='val')

    test_loader = data.DataLoader(dataset_test, batch_size=2)
    print('Validation data:', len(dataset_test))
    # Network
    #net = DenseNet(121,True,pool)
    net = dn.densenet121(pretrained=True)
    net.classifier = nn.Sequential(nn.Dropout(0.4), nn.Linear(1024, 5))
    print(net)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in net.parameters()])))
    print('############### Model Finished ####################')
    criterion = nn.CrossEntropyLoss()
    if USE_CUDA:
        state_dict = torch.load(model_file_path)
Beispiel #2
0
    train = pd.read_csv(summary_path + 'train.csv')#.sample(n=50).reset_index()
    val = pd.read_csv(summary_path + 'val.csv')#.reset_index() # split train - test set.

    start_val = 0
    tensor_transform_train = transforms.Compose([
                    RandomCrop(896),
                    transforms.ToTensor(),
                    lambda x: x.float(),
                ])
    tensor_transform_val = transforms.Compose([
                    CenterCrop(896),
                    transforms.ToTensor(),
                    lambda x: x.float(),
                ])
    dataset_train = KneeGradingDataset(train,HOME_PATH,tensor_transform_train,stage = 'train')
    dataset_val = KneeGradingDataset(val,HOME_PATH,tensor_transform_val,stage = 'val')

    train_loader = data.DataLoader(dataset_train,batch_size=8)
    val_loader = data.DataLoader(dataset_val,batch_size=8)
    print('Training data: ', len(dataset_train))
    print('Validation data:', len(dataset_val))
    net = resnet34(pretrained=True)
    print(net)
    net.avgpool = nn.AvgPool2d(28, 28)
    net.fc = nn.Sequential(nn.Dropout(0.2), nn.Linear(512, 5))  # OULU's paper.
    load_file = None
    #'/gpfs/data/denizlab/Users/bz1030/KneeNet/KneeProject/model/model_torch/model_flatten_linear_layer/model_weights3/epoch_3.pth'
    if load_file:
        net.load_state_dict(torch.load(load_file))
        start_epoch = 3
Beispiel #3
0
    HOME_PATH = '/gpfs/data/denizlab/Users/bz1030/data/OAI_processed/mix/'
    summary_path = '/gpfs/data/denizlab/Users/bz1030/data/OAI_processed/'
    log_file_path = '/gpfs/data/denizlab/Users/bz1030/model/model_torch/model_flatten_linear_layer/train_log'
    model_file_path = '/gpfs/data/denizlab/Users/bz1030/model/model_torch/model_flatten_linear_layer/model_weights'

    test = pd.read_csv(summary_path + 'test.csv').sample(
        n=40000).reset_index()  # split train - test set.

    start_test = 0
    tensor_transform_test = transforms.Compose([
        CenterCrop(896),
        transforms.ToTensor(),
        lambda x: x.float(),
    ])
    dataset_test = KneeGradingDataset(test,
                                      HOME_PATH,
                                      tensor_transform_test,
                                      stage='test')

    test_loader = data.DataLoader(dataset_test, batch_size=10)
    print('Test data:', len(dataset_test))

    net = resnet34(pretrained=True)
    net.avgpool = nn.AvgPool2d(7, 7)
    net.fc = nn.Sequential(nn.Dropout(0.1), nn.Linear(8192, 5))
    print(net)
    # Network
    net = nn.DataParallel(net)
    optimizer = optim.Adam(net.parameters(), lr=0.0001, weight_decay=0)
    net.load_state_dict(torch.load('model_weights/checkpoint_520.pth'))
    net.eval()