def get_dataloader(batch_size):
    train_data = KgForestDataset(
        split='train-40479',
        transform=Compose([
            Lambda(lambda x: randomShiftScaleRotate(
                x, u=0.75, shift_limit=6, scale_limit=6, rotate_limit=45)),
            Lambda(lambda x: randomFlip(x)),
            Lambda(lambda x: randomTranspose(x)),
            Lambda(lambda x: toTensor(x)),
            Normalize(mean=mean, std=std)
        ]),
        height=256,
        width=256)
    train_data_loader = DataLoader(batch_size=batch_size,
                                   dataset=train_data,
                                   shuffle=True)

    validation = KgForestDataset(
        split='validation-3000',
        transform=Compose([
            # Lambda(lambda x: randomShiftScaleRotate(x, u=0.75, shift_limit=6, scale_limit=6, rotate_limit=45)),
            Lambda(lambda x: randomFlip(x)),
            Lambda(lambda x: randomTranspose(x)),
            Lambda(lambda x: toTensor(x)),
            Normalize(mean=mean, std=std)
        ]),
        height=256,
        width=256)

    valid_dataloader = DataLoader(dataset=validation,
                                  shuffle=False,
                                  batch_size=batch_size)
    return train_data_loader, valid_dataloader
                best_thresh = r
                best_score = score
        threshold[i] = best_thresh
        print(i, best_score, best_thresh)
    return threshold


if __name__ == '__main__':
    model1 = nn.DataParallel(densenet161(pretrained=True).cuda())
    model1.load_state_dict(torch.load('../models/densenet161.pth'))
    model1.cuda().eval()
    validation = KgForestDataset(
        split='validation-3000',
        transform=Compose(
            [
                Lambda(lambda x: randomFlip(x)),
                Lambda(lambda x: randomTranspose(x)),
                Lambda(lambda x: toTensor(x)),
                Normalize(mean=mean, std=std)
            ]
        ),
        height=256,
        width=256
    )

    valid_dataloader = DataLoader(dataset=validation, shuffle=False, batch_size=512)

    threshold = np.zeros(17)
    for i in range(10):
        threshold += optimize_threshold([model1], [valid_dataloader])
    print(threshold/10)