def get_labels():
    batch_size = 4
    test_transform = baseline.test_transform
    testset = per_class_dataset(csv_file=r'/valid.csv',
                                root_dir=present_dir_path,
                                transform=test_transform,
                                RGB=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=1)
    all_labels = np.array([])
    for data in testloader:
        images, labels, weights = data
        all_labels = np.append(all_labels, labels.numpy())
    return all_labels
def make_Difficulty():
    load_model = True
    batch_size = 8
    if load_model == True:
        load_path = "./save/densenet169/mura3cropfocal_1_acc.pkl"

    train_transform = transforms.Compose([
        transforms.Resize((320, 320)),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    net = torchvision.models.densenet169(pretrained=False)
    net.classifier = nn.Linear(1664, 1)
    net.load_state_dict(torch.load(load_path))
    if torch.cuda.is_available():
        net = net.cuda()

    present_dir_path = os.getcwd()
    trainset = per_class_dataset(csv_file=r'/train.csv',
                                 root_dir=present_dir_path,
                                 transform=train_transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=4)
    tot_loss = np.array([])
    net.eval()
    print("Start to evaluate")
    for (i, data) in enumerate(trainloader, 0):
        print("step %d" % i)
        images, labels, weights = data
        images = images.cuda()
        labels = labels.cuda()
        weights = weights.cuda().float()
        outputs = torch.sigmoid(net(images))
        outputs = outputs.select(1, 0)
        outputs = torch.clamp(outputs, min=1e-7, max=1 - 1e-7)
        loss = -(labels.float() * outputs.log() + (1 - labels.float()) *
                 (1 - outputs).log())
        loss *= weights
        tot_loss = np.append(tot_loss, loss.cpu().detach().numpy())

    Difficulty = np.argsort(tot_loss)
    np.save("./result/easy_to_hard.npy", Difficulty)
def get_from_densenet201(load_path):
    batch_size = 4
    test_transform = densenet201.test_transform
    testset = per_class_dataset(csv_file=r'/valid.csv',
                                root_dir=present_dir_path,
                                transform=test_transform,
                                RGB=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=1)
    net = torchvision.models.densenet201(pretrained=False)
    net.classifier = nn.Linear(1920, 1)
    net.load_state_dict(torch.load(load_path))

    # if torch.cuda.is_available():
    #     net = net.cuda()

    predicts = get_predicts(net, testloader)
    print("finish model: %s" % load_path)
    return predicts
예제 #4
0
# to be modifed when testing on different model
load_path = './save/densenet169/b8_lr1e-4_d0_logloss.pkl'
root_dir = os.getcwd()
net = torchvision.models.densenet201(pretrained=False)
net.classifier = nn.Linear(1920, 1)
total = 0
result = 0.0

test_transform = densenet.test_transform
print(test_transform)

for i in study_type:
    for j in pn:
        testset[i][j] = per_class_dataset(root_dir,
                                          '/traincsv/%s_%s.csv' % (i, j),
                                          transform=test_transform,
                                          RGB=True)
        total += len(testset[i][j])
        testloader[i][j] = torch.utils.data.DataLoader(testset[i][j],
                                                       batch_size=8,
                                                       shuffle=False,
                                                       num_workers=2)

if torch.cuda.device_count() > 1:
    print("Let's use GPUS!")
    # net = nn.DataParallel(net,device_ids=[0,2,3,4,5])

net.load_state_dict(torch.load(load_path))

if torch.cuda.is_available():
    net = net.cuda()
예제 #5
0
    learning_rate = args.learning_rate
    drop_rate = args.drop_rate
    load_model = args.load_model
    if load_model:
        load_path = args.load_path
    net_name = args.net_name
    save_path = args.save_path
    if os.path.exists(save_path) == False:
        os.makedirs(save_path)
    save_path += net_name + '.pkl'
    loss_type = args.loss_type

    present_dir_path = os.getcwd()

    trainset = per_class_dataset(csv_file=r'/train.csv',
                                 root_dir=present_dir_path,
                                 transform=train_transform,
                                 RGB=True)
    # print ("trainset 0")
    # print (trainset[0])
    # print ("trainset 1")
    # print (trainset[1])
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4)
    testset = per_class_dataset(csv_file='/valid.csv',
                                root_dir=present_dir_path,
                                transform=test_transform,
                                RGB=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size,