def get_attr_ims(attr, num=10):
    ids = prep.get_attr(attr_map, id_attr_map, attr)
    dataset = prep.ImageDiskLoader(ids)
    indices = np.random.randint(0, len(dataset), num)
    ims = [dataset[i] for i in indices]
    idx_ids = [dataset.im_ids[i] for i in indices]
    return ims, idx_ids
Exemple #2
0
    LOG_PATH = './logs/log.pkl'
    MODEL_PATH = './checkpoints/'
    COMPARE_PATH = './comparisons/'

    use_cuda = USE_CUDA and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print('Using device', device)
    print('num cpus:', multiprocessing.cpu_count())
    print(torch.cuda.is_available())

    # training code
    train_ids, test_ids = prep.split_dataset()
    print('num train_images:', len(train_ids))
    print('num test_images:', len(test_ids))

    data_train = prep.ImageDiskLoader(train_ids)
    data_test = prep.ImageDiskLoader(test_ids)
    print(data_train)

    kwargs = {
        'num_workers': multiprocessing.cpu_count(),
        'pin_memory': True
    } if use_cuda else {}

    #train_loader = torch.utils.data.DataLoader(data_train, batch_size=BATCH_SIZE, shuffle=True, **kwargs)
    train_loader = torch.utils.data.DataLoader(x_train_new,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True,
                                               **kwargs)
    #test_loader = torch.utils.data.DataLoader(data_test, batch_size=TEST_BATCH_SIZE, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(x_test_new,