示例#1
0
 def __getitem__(self, index):
     # derive ImageFolder to support slice, list, tuple..., return PIL image(default)
     # or tensors(if transform: toTensor)
     try:
         # equals to if isinstance(index, Number):
         return ImageFolder.__getitem__(self, index)[0]
     except (ValueError, TypeError):
         if isinstance(index, slice):
             # tuple_list: [(img, tag), (img, tag)]......
             return [
                 ImageFolder.__getitem__(self, i)[0] for i in range(
                     super(ExtendedDataset, self).__len__())[index]
             ]
         elif isinstance(index, (tuple, list, np.ndarray, torch.Tensor)):
             return [ImageFolder.__getitem__(self, i)[0] for i in index]
         # only use [xxx], or tuple(xxx) as in default_collate:
         # batch must contain tensors, numpy arrays, numbers, dicts or lists; found <class 'generator'>
         else:
             raise TypeError(
                 type_error_msg('index', type(index),
                                ExtendedDataset.INDEX_LIST))
示例#2
0
    def __getitem__(self, index):
        sample, target = ImageFolder.__getitem__(self, index)
        # img_path = self.files[index % len(self.files)]
        # Extract image as PyTorch tensor
        # img = transforms.ToTensor()(Image.open(img_path))
        # Pad to square resolution
        img, _ = pad_to_square(sample, 0)
        # Resize
        img = resize(img, self.img_size)

        #return img_path, img

        # print("test")
        return img, target
# confusion matrix
cm = [[0 for x in range(tgc.network.fc.out_features)]
      for y in range(tgc.network.fc.out_features)]

nb_good = 0
nb_bad = 0
tgc.network.eval()
imgn = 0

feature_file = open(args.output, 'wb')

with torch.no_grad():
    idx = [i for i in range(0, test.__len__())]
    shuffle(idx)
    for sample_num in idx:
        sample, target = test.__getitem__(sample_num)
        print(target)

        score = torch.zeros(1, tgc.network.fc.out_features).to(dev)
        features = torch.zeros(1, tgc.network.fc.in_features).to(dev)
        print(features.size())
        for n in range(0, args.count):
            sample, target = test.__getitem__(sample_num)
            out, _, ap = tgc.network(sample.unsqueeze_(0).to(dev))
            ap = ap.view(ap.size(0), -1)
            score += out
            features += ap
            feature_file.write(target.to_bytes(1, byteorder='big',
                                               signed=True))
            numpy.save(feature_file, ap.cpu().data.numpy())
        _, p = torch.max(score, 1)
示例#4
0
    def __getitem__(self, idx: int) -> Dict:
        image, label = TVImageFolder.__getitem__(self, idx)

        return {"img": image, "label": label}
示例#5
0
    transforms.RandomCrop(size=198, pad_if_needed=True),
    transforms.Resize(64),
    transforms.RandomHorizontalFlip(0.5),
    transforms.RandomVerticalFlip(0.5),
    transforms.RandomRotation(90),
    transforms.ToTensor()
])

train_dataset = ImageFolder('data/New101/train', transform=transform)
bg_sample = train_dataset.samples[-2012:]
bg_target = train_dataset.targets[-2012:]
train_dataset.samples += 10 * bg_sample
train_dataset.targets += 10 * bg_target

for i in tqdm(range(len(train_dataset))):
    train_dataset.__getitem__(i)

val_dataset = ImageFolder('data/New101/val')
for i in tqdm(range(len(val_dataset))):
    val_dataset.__getitem__(i)

# print('done')
# numbers = {}
# a = os.listdir('data/New101')
# for cate in a:
#     b = os.listdir('data/New101/'+cate)
#     numbers[cate]=len(b)
#     for img in b:
#         if not img.endswith('jpg'):
#             print(cate+'/'+img)
示例#6
0
                     'classifier.tgc')):
    tgc = TypegroupsClassifier.load(
        os.path.join('ocrd_typegroups_classifier', 'models', 'classifier.tgc'))
else:
    print('Could not load a model to evaluate')
    quit(1)

validation = ImageFolder('lines/validation', transform=None)
validation.target_transform = tgc.classMap.get_target_transform(
    validation.class_to_idx)
good = 0
bad = 0
with torch.no_grad():
    tgc.network.eval()
    for idx in tqdm(range(validation.__len__()), desc='Evaluation'):
        sample, target = validation.__getitem__(idx)
        path, _ = validation.samples[idx]
        if target == -1:
            continue
        result = tgc.classify(sample, 224, 64, True)
        highscore = max(result)
        label = tgc.classMap.cl2id[result[highscore]]
        if target == label:
            good += 1
        else:
            bad += 1

accuracy = 100 * good / float(good + bad)

print('    Good:', good)
print('     Bad:', bad)
示例#7
0
文件: ImageNet.py 项目: raguram/eva
class TinyImageNet(VisionDataset):

    def __init__(self, root, train=True, transform=None, target_transform=None):
        super().__init__(root, transform=transform,
                         target_transform=target_transform)
        self.train = train
        self.__load_classes__(root)
        if (train):
            self.train_folder = join(root, "train")
            self.__prepare_train__(root)
        else:
            self.test_images_folder = join(root, "val/images")
            self.test_annotations_folder = join(root, "val")
            self.__prepare_test__(root)

    def __load_classes__(self, root):
        self.idx_class = Utility.loadFileToArray(root + "/wnids.txt")
        self.class_idx = dict((c, i) for i, c in enumerate(self.idx_class))

    def __prepare_train__(self, root):
        self.trainImageFolder = ImageFolder(self.train_folder)
        cidx = self.trainImageFolder.class_to_idx
        self.trainImageFolder_idx_to_class = dict((cidx[k], k) for k in cidx)
        self.data_size = len(self.trainImageFolder)

    def __prepare_test__(self, root):
        self.test_images_dict = Utility.load_images_to_dict(self.test_images_folder, "JPEG")
        self.test_images_files = list(self.test_images_dict.keys())

        self.test_truth_labels = Utility.loadTsvAsDict(join(self.test_annotations_folder, "val_annotations.txt"))
        self.test_truth_labels = dict(
            (join(self.test_images_folder, f), self.test_truth_labels[f]) for f in self.test_truth_labels)
        self.data_size = len(self.test_images_files)

    def __getitem__(self, index):

        if (self.train):
            x, class_y = self.__train_get_item__(index)
        else:
            x, class_y = self.__test_get_item__(index)

        if self.transform:
            x = self.transform(x)

        y = self.class_idx[class_y]
        return x, y

    def __train_get_item__(self, index):
        x, y = self.trainImageFolder.__getitem__(index)
        # Convert y to class
        class_y = self.trainImageFolder_idx_to_class[y]
        return x, class_y

    def __test_get_item__(self, index):
        imgFile = self.test_images_files[index]
        x = self.test_images_dict[imgFile]
        class_y = self.test_truth_labels[imgFile]
        return x, class_y

    def __len__(self):
        return self.data_size
示例#8
0
## Hyper-parameter
num_epochs, batch_size, learning_rate = 10, 64, 0.1

## Data Transform
transform = transforms.Compose([transforms.Resize((224, 224)),
                                transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])

## Custom Dataset
Train_dataset = ImageFolder(root='./food/train', transform=transform)
Test_dataset = ImageFolder(root='./food/train', transform=transform)
Train_loader = DataLoader(dataset=Train_dataset, shuffle=True, batch_size=batch_size)
# Test_loader = DataLoader(dataset=Test_dataset, shuffle=False, batch_size=batch_size)

## Get the sample data
img = Train_dataset.__getitem__(index=0)[0]
plt.cla()
plt.imshow(img.permute([1,2,0]))
plt.title('sample image')
plt.pause(1)

## Training
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(Train_loader):
        outputs = model(images)
        loss = criterion(outputs, labels)
        optimizer.zero_grad()
        loss.backward()