def __call__(self, dictionary): """@TODO: Docs. Contribution is welcome.""" image = dictionary[self.input_key] rotation_factor = 0 if random.random() < self.rotate_probability: rotation_factor = self.rotate.get_params()["factor"] image = self.rotate.apply(img=image, factor=rotation_factor) if random.random() < self.hflip_probability: rotation_factor += 4 image = self.hflip.apply(img=image) dictionary[self.input_key] = image dictionary[self.output_key] = rotation_factor if self.targets_key is not None: class_rotation_factor = (dictionary[self.targets_key] * 8 + rotation_factor) key = f"class_rotation_{self.targets_key}" dictionary[key] = class_rotation_factor if self.one_hot_classes is not None: one_hot = utils.get_one_hot(class_rotation_factor, self.one_hot_classes) key = f"class_rotation_{self.targets_key}_one_hot" dictionary[key] = one_hot return dictionary
def __getitem__(self, index): paths = [self._get_image_path(index, ch) for ch in self.channels] images = [self.open_image(p) for p in paths] try: img = np.stack(images) except (TypeError, ValueError) as e: print(f'Warning: cannot concatenate images! {e.__class__.__name__}: {e}') for filename, image in zip(paths, images): print(f'\tpath={filename}, size={image.size}') index = (index + 1) % len(self) print(f'Skipping instance {index} and trying another one...') return self[index] finally: for image in images: if hasattr(image, 'close'): image.close() img = img.astype(np.float32) img = img.transpose(1, 2, 0) r = self.records[index] if self.train: sirna = r.sirna target = int(sirna) onehot = get_one_hot(target, num_classes=self.n_classes) return {'features': img, 'targets': target, 'targets_one_hot': onehot, 'id_code': r.id_code, 'site': self.site} else: id_code = r.id_code return {'features': img, 'id_code': id_code, 'site': self.site}
def __getitem__(self, index): item = self.items[index].copy() bunch = item.pop('images') if self.channels_mode == 'six': channels = [] for i, filename in sorted(bunch, key=itemgetter(0)): img = self.open_fn(filename) img = img if self.tr is None else self.tr(img) channels.append(img) sample = torch.cat(channels, dim=0) else: filenames = [filename for _, filename in bunch] t = rio.load_images_as_tensor(filenames) arr = rio.convert_tensor_to_rgb(t) img = PIL.Image.fromarray(arr.astype(np.uint8)) sample = img if self.tr is None else self.tr(img) # y = self.targets[index] y = item[self.targets_key] if self.drop_meta: item = dict(features=sample, targets=y) else: item.update(dict(features=sample, targets=y)) if self.onehot: y_enc = get_one_hot(y, smoothing=self.label_smoothing, num_classes=self.num_classes) sample['targets_one_hot'] = y_enc return item
def __getitem__(self, index): item = self.items[index].copy() bunch = sorted(item.pop(self.features_key), key=itemgetter(0)) channels = OrderedDict() if self.channels_mode == 'six': for i, filename in bunch: channels[f'chan_{i}'] = np.array(self.open_fn(filename)) elif self.channels_mode == 'rgb': img = np.stack([self.open_fn(filename) for filename in bunch]) img = rio.convert_tensor_to_rgb(img) for i in range(3): channes[f'chan_{i}'] = img[i, :, :] y = item[self.targets_key] if self.drop_meta: sample = item sample['features'] = channels sample['targets'] = y else: sample = dict(features=channels, targets=y) if self.onehot: y_enc = get_one_hot(y, smoothing=self.label_smoothing, num_classes=self.num_classes) sample['targets_one_hot'] = y_enc return sample
def _wrap_with_meta(self, image, meta): if self.train: sirna = meta.sirna target = int(sirna) onehot = get_one_hot(target, num_classes=self.n_classes, smoothing=self.label_smoothing) return {'features': image, 'targets': target, 'targets_one_hot': onehot, 'id_code': meta.id_code} else: return {'features': image, 'id_code': meta.id_code}
def __call__(self, row): """Reads a row from your annotations dict with filename and transfer it to a single value Args: row: elem in your dataset. Returns: dtype: Scalar value """ scalar = self.dtype(row.get(self.input_key, self.default_value)) if self.one_hot_classes is not None: scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing) result = {self.output_key: scalar} return result
def __getitem__(self, index): paths = [self._get_image_path(index, ch) for ch in self.channels] img = np.stack([self.open_image(p) for p in paths]) img = img.astype(np.float32) img = img.transpose(1, 2, 0) r = self.records[index] if self.train: sirna = r.sirna target = int(sirna) onehot = get_one_hot(target, num_classes=self.n_classes) return { 'features': img, 'targets': target, 'targets_one_hot': onehot } else: id_code = r.id_code return {'features': img, 'id_code': id_code}
def __call__(self, element): """ Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value """ scalar = self.dtype(element.get(self.input_key, self.default_value)) if self.one_hot_classes is not None: scalar = get_one_hot( scalar, self.one_hot_classes, smoothing=self.smoothing ) output = {self.output_key: scalar} return output
def __getitem__(self, index): bunch = self.items[index]['images'] sample = self.items[index].copy() channels = [] for i, filename in sorted(bunch, key=itemgetter(0)): img = self.open_fn(filename) img = img if self.tr is None else self.tr(img) channels.append(img) sample = torch.cat(channels, dim=0) y = self.targets[index] sample.update(dict(features=sample, targets=y)) if self.onehot: y_enc = get_one_hot( y, smoothing=self.label_smoothing, num_classes=self.num_classes) sample['targets_one_hot'] = y_enc from pdb import set_trace set_trace() return sample