def test_TensorKey_eq_norm(self): x = torch.tensor((0.0, 0.5, 1.0)) y = torch.tensor((0.0, 0.6, 1.0)) key1 = pystiche.TensorKey(x) key2 = pystiche.TensorKey(y) self.assertFalse(key1 == key2)
def test_eq_norm(self): x = torch.tensor((0.0, 0.5, 1.0)) y = torch.tensor((0.0, 0.6, 1.0)) key1 = pystiche.TensorKey(x) key2 = pystiche.TensorKey(y) assert key1 != key2
def test_eq_precision(self): x = torch.tensor(1.0) y = torch.tensor(1.0001) assert pystiche.TensorKey(x) != pystiche.TensorKey(y) assert pystiche.TensorKey(x, precision=3) == pystiche.TensorKey( y, precision=3)
def test_TensorKey_eq_precision(self): x = torch.tensor(1.0) y = torch.tensor(1.0001) self.assertFalse(pystiche.TensorKey(x) == pystiche.TensorKey(y)) self.assertTrue( pystiche.TensorKey(x, precision=3) == pystiche.TensorKey( y, precision=3))
def test_eq_min(self): x = torch.tensor((0.0, 0.5, 1.0)) # This creates a tensor with given min and the same max and norm values as x min = 0.1 intermediate = torch.sqrt(torch.norm(x)**2.0 - (1.0 + min**2.0)).item() y = torch.tensor((min, intermediate, 1.0)) key1 = pystiche.TensorKey(x) key2 = pystiche.TensorKey(y) assert key1 != key2
def test_eq_max(self): x = torch.tensor((0.0, 0.5, 1.0)) # This creates a tensor with given max and the same min and norm values as x max = 0.9 intermediate = torch.sqrt(torch.norm(x)**2.0 - max**2.0).item() y = torch.tensor((0.0, intermediate, max)) key1 = pystiche.TensorKey(x) key2 = pystiche.TensorKey(y) assert key1 != key2
def test_vgg_multi_layer_encoder(subtests, vgg_archs, vgg_multi_layer_encoder_loaders, enc_asset_loader): for arch, loader in zip(vgg_archs, vgg_multi_layer_encoder_loaders): with subtests.test(arch=arch): asset = enc_asset_loader(arch) multi_layer_encoder = loader( pretrained=True, weights="torch", preprocessing=False, allow_inplace=False, ) layers = tuple(multi_layer_encoder.children_names()) with torch.no_grad(): encs = multi_layer_encoder(asset.input.image, layers) actual = dict( zip( layers, [ pystiche.TensorKey(x, precision=asset.params.precision) for x in encs ], )) desired = asset.output.enc_keys assert actual == desired
def _get_image_or_guide(self: loss.Loss, attr: str, comparison_only: bool = False) -> torch.Tensor: images_or_guides: List[torch.Tensor] = [] for op in self._losses(): if comparison_only and not isinstance(op, loss.ComparisonLoss): continue try: image_or_guide = getattr(op, attr) except AttributeError: continue if image_or_guide is not None: images_or_guides.append(image_or_guide) if not images_or_guides: raise RuntimeError(f"No immediate children has a {attr}.") image_or_guide = images_or_guides[0] key = pystiche.TensorKey(image_or_guide) if not all(key == other for other in images_or_guides[1:]): raise RuntimeError(f"The immediate children have non-matching {attr}") return image_or_guide
def test_VGGMultiLayerEncoder(self): archs = ("vgg11", "vgg13", "vgg16", "vgg19") archs = (*archs, *[f"{arch}_bn" for arch in archs]) for arch in archs: with self.subTest(arch=arch): asset = self.load_asset(path.join("enc", arch)) get_vgg_multi_layer_encoder = enc.__dict__[ f"{arch}_multi_layer_encoder"] multi_layer_encoder = get_vgg_multi_layer_encoder( weights="torch", preprocessing=False, allow_inplace=False) layers = tuple(multi_layer_encoder.children_names()) with torch.no_grad(): encs = multi_layer_encoder(asset.input.image, layers) actual = dict( zip( layers, [ pystiche.TensorKey( x, precision=asset.params.precision) for x in encs ], )) desired = asset.output.enc_keys self.assertDictEqual(actual, desired)
def forward(self, input: torch.Tensor, layers: Sequence[str], store: bool = False) -> Tuple[torch.Tensor, ...]: r"""Encode the input on the given layers in a single forward pass. If the input was encoded before the encodings are extracted from the storage rather than executing the forward pass again. Args: input: Input. layers: Layers. store: If ``True``, store the encodings. Returns: Tuple of encodings which order corresponds to ``layers``. """ storage = copy(self._storage) input_key = pystiche.TensorKey(input) stored_layers = [ name for name, key in storage.keys() if key == input_key ] diff_layers = set(layers) - set(stored_layers) if diff_layers: deepest_layer = self.extract_deepest_layer(diff_layers) for name, module in self.named_children_to(deepest_layer, include_last=True): input = storage[(name, input_key)] = module(input) if store: self._storage = storage return tuple([storage[(name, input_key)] for name in layers])
def encode(self, input: torch.Tensor) -> None: if not self.registered_layers: return key = pystiche.TensorKey(input) keys = [(layer, key) for layer in self.registered_layers] encs = self(input, layers=self.registered_layers, store=True) self._storage = dict(zip(keys, encs))
def _generate_sequential_enc_asset(file, model, image, precision=2): model.eval() input_image = image.clone() enc_keys = {} for layer, module in model.named_children(): image = module(image) enc_keys[layer] = pystiche.TensorKey(image, precision=precision) input = {"image": input_image} params = {"precision": precision} output = {"enc_keys": enc_keys} store_asset(input, params, output, file)
def encode(self, input: torch.Tensor) -> None: r"""Encode the given input and store the encodings of all :attr:`MultiLayerEncoder.registered_layers`. Args: input: Input. """ if not self.registered_layers: return key = pystiche.TensorKey(input) keys = [(layer, key) for layer in self.registered_layers] encs = self(input, layers=self.registered_layers, store=True) self._storage = dict(zip(keys, encs))
def test_AlexNetMultiLayerEncoder(self): asset = self.load_asset(path.join("enc", "alexnet")) multi_layer_encoder = enc.alexnet_multi_layer_encoder( weights="torch", preprocessing=False, allow_inplace=False) layers = tuple(multi_layer_encoder.children_names()) with torch.no_grad(): encs = multi_layer_encoder(asset.input.image, layers) actual = dict( zip( layers, [ pystiche.TensorKey(x, precision=asset.params.precision) for x in encs ], )) desired = asset.output.enc_keys self.assertDictEqual(actual, desired)
def test_alexnet_multi_layer_encoder(enc_asset_loader): asset = enc_asset_loader("alexnet") multi_layer_encoder = enc.alexnet_multi_layer_encoder(pretrained=True, weights="torch", preprocessing=False, allow_inplace=False) layers = tuple(multi_layer_encoder.children_names()) with torch.no_grad(): encs = multi_layer_encoder(asset.input.image, layers) actual = dict( zip( layers, [ pystiche.TensorKey(x, precision=asset.params.precision) for x in encs ], )) desired = asset.output.enc_keys assert actual == desired
def forward(self, input: torch.Tensor, layers: Sequence[str], store: bool = False) -> Tuple[torch.Tensor, ...]: storage = copy(self._storage) input_key = pystiche.TensorKey(input) stored_layers = [ name for name, key in storage.keys() if key == input_key ] diff_layers = set(layers) - set(stored_layers) if diff_layers: deepest_layer = self.extract_deepest_layer(diff_layers) for name, module in self.named_children_to(deepest_layer, include_last=True): input = storage[(name, input_key)] = module(input) if store: self._storage = storage return tuple([storage[(name, input_key)] for name in layers])
def test_main(self, enc_asset_loader): asset = enc_asset_loader("alexnet") multi_layer_encoder = enc.alexnet_multi_layer_encoder( pretrained=True, weights="torch", preprocessing=False, allow_inplace=False ) layers = tuple(multi_layer_encoder.children_names()) with torch.no_grad(): encs = multi_layer_encoder(asset.input.image, layers) actual = dict( zip( layers, [pystiche.TensorKey(x, precision=asset.params.precision) for x in encs], ) ) desired = asset.output.enc_keys assert actual == desired @pytest.mark.parametrize( ("framework", "should_be_available"), [ pytest.param(framework, should_be_available, id=framework) for framework, should_be_available in [ ("torch", True), ("caffe", False), ] ], ) def test_state_dict_url(self, framework, should_be_available): multi_layer_encoder = enc.alexnet_multi_layer_encoder(pretrained=False) if should_be_available: assert isinstance(multi_layer_encoder.state_dict_url(framework), str) else: with pytest.raises(RuntimeError): multi_layer_encoder.state_dict_url(framework)
def test_eq_tensor(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) assert key == x
def test_TensorKey_eq(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) self.assertTrue(key == key) self.assertTrue(key == pystiche.TensorKey(x.flip(0)))
def test_TensorKey_eq_tensor(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) self.assertTrue(key == x)
def test_repr_smoke(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) assert isinstance(repr(key), str)
def test_TensorKey_eq_other(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) with self.assertRaises(TypeError): self.assertTrue(key == 1)
def test_hash_smoke(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) assert isinstance(hash(key), int)
def test_TensorKey_eq_dtype(self): x = torch.tensor((0.0, 0.5, 1.0)) key1 = pystiche.TensorKey(x.float()) key2 = pystiche.TensorKey(x.double()) self.assertFalse(key1 == key2)
def test_TensorKey_eq_device(self): x = torch.tensor((0.0, 0.5, 1.0)) key1 = pystiche.TensorKey(x.cpu()) key2 = pystiche.TensorKey(x.cuda()) self.assertFalse(key1 == key2)
def test_eq_size(self): x = torch.tensor((0.0, 0.5, 1.0)) key1 = pystiche.TensorKey(x) key2 = pystiche.TensorKey(x[:-1]) assert key1 != key2
def test_eq_dtype(self): x = torch.tensor((0.0, 0.5, 1.0)) key1 = pystiche.TensorKey(x.float()) key2 = pystiche.TensorKey(x.double()) assert key1 != key2
def test_eq_device(self): x = torch.tensor((0.0, 0.5, 1.0)) key1 = pystiche.TensorKey(x.cpu()) key2 = pystiche.TensorKey(x.cuda()) assert key1 != key2
def test_TensorKey_eq_size(self): x = torch.tensor((0.0, 0.5, 1.0)) key1 = pystiche.TensorKey(x) key2 = pystiche.TensorKey(x[:-1]) self.assertFalse(key1 == key2)
def test_eq(self): x = torch.tensor((0.0, 0.5, 1.0)) key = pystiche.TensorKey(x) assert key == key assert key == pystiche.TensorKey(x.flip(0))