def to_iter(data_loader): # TODO: not sure how to make this cleaner for x, y in data_loader: # if it's not multi input # NOTE: torch.utils.data.DataLoader returns a list in this case if not isinstance(x, list): x = (x, ) yield Batch(inputs=tuple(x), labels=y)
def dataset(self, texts, targets): for text, target in zip(texts, targets): t, t_len = self.encode_text(text) t, t_len = t.unsqueeze(0), t_len.unsqueeze(0) target_idx = self.vocab.stoi[target] yield Batch(inputs=(t, ), labels=(target_idx, ), additional_args=t_len)
def formatted_data_iter(): dataset = torchvision.datasets.CIFAR10( root="data/test", train=False, download=True, transform=transforms.ToTensor() ) dataloader = iter( torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2) ) while True: images, labels = next(dataloader) yield Batch(inputs=images, labels=labels)
input_imgs = torch.cat(list(map(lambda i: full_img_transform(i), imgs)), 0) visualizer = AttributionVisualizer( models=[model], score_func=lambda o: torch.nn.functional.softmax(o, 1), classes=list(map(lambda k: idx_to_labels[k][1], idx_to_labels.keys())), features=[ ImageFeature( "Photo", baseline_transforms=[baseline_func], input_transforms=[], ) ], dataset=[Batch(input_imgs, labels=[282, 849, 69])]) ######################################################################### # Note that running the cell above didn’t take much time at all, unlike # our attributions above. That’s because Captum Insights lets you # configure different attribution algorithms in a visual widget, after # which it will compute and display the attributions. *That* process will # take a few minutes. # # Running the cell below will render the Captum Insights widget. You can # then choose attributions methods and their arguments, filter model # responses based on predicted class or prediction correctness, see the # model’s predictions with associated probabilities, and view heatmaps of # the attribution compared with the original image. #
def _formatted_data_iter(self,dl,normalize_func): dl_iter=iter(dl) while True: images,labels=next(dl_iter) images=normalize_func.decode(images).to(dl.device) yield Batch(inputs=images, labels=labels)
def formatted_data_iter(dataloader): dataloader = iter(dataloader) while True: images, labels = next(dataloader) yield Batch(inputs=images, labels=labels)