Esempio n. 1
0
def convert_vit_mae_checkpoint(checkpoint_url, pytorch_dump_folder_path):
    config = ViTMAEConfig()
    if "large" in checkpoint_url:
        config.hidden_size = 1024
        config.intermediate_size = 4096
        config.num_hidden_layers = 24
        config.num_attention_heads = 16
    elif "huge" in checkpoint_url:
        config.patch_size = 14
        config.hidden_size = 1280
        config.intermediate_size = 5120
        config.num_hidden_layers = 32
        config.num_attention_heads = 16

    model = ViTMAEForPreTraining(config)

    state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]

    feature_extractor = ViTMAEFeatureExtractor(size=config.image_size)

    new_state_dict = convert_state_dict(state_dict, config)

    model.load_state_dict(new_state_dict)
    model.eval()

    url = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"

    image = Image.open(requests.get(url, stream=True).raw)
    feature_extractor = ViTMAEFeatureExtractor(size=config.image_size)
    inputs = feature_extractor(images=image, return_tensors="pt")

    # forward pass
    torch.manual_seed(2)
    outputs = model(**inputs)
    logits = outputs.logits

    if "large" in checkpoint_url:
        expected_slice = torch.tensor(
            [[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]]
        )
    elif "huge" in checkpoint_url:
        expected_slice = torch.tensor(
            [[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]]
        )
    else:
        expected_slice = torch.tensor(
            [[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]]
        )

    # verify logits
    assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4)

    print(f"Saving model to {pytorch_dump_folder_path}")
    model.save_pretrained(pytorch_dump_folder_path)

    print(f"Saving feature extractor to {pytorch_dump_folder_path}")
    feature_extractor.save_pretrained(pytorch_dump_folder_path)
Esempio n. 2
0
 def create_and_check_for_pretraining(self, config, pixel_values, labels):
     model = ViTMAEForPreTraining(config)
     model.to(torch_device)
     model.eval()
     result = model(pixel_values)
     # expected sequence length = num_patches
     image_size = to_2tuple(self.image_size)
     patch_size = to_2tuple(self.patch_size)
     num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
     expected_seq_len = num_patches
     expected_num_channels = self.patch_size ** 2 * self.num_channels
     self.parent.assertEqual(result.logits.shape, (self.batch_size, expected_seq_len, expected_num_channels))
    def create_and_check_for_pretraining(self, config, pixel_values, labels):
        model = ViTMAEForPreTraining(config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)
        num_patches = (self.image_size // self.patch_size)**2
        expected_num_channels = self.patch_size**2 * self.num_channels
        self.parent.assertEqual(
            result.logits.shape,
            (self.batch_size, num_patches, expected_num_channels))

        # test greyscale images
        config.num_channels = 1
        model = ViTMAEForPreTraining(config)
        model.to(torch_device)
        model.eval()
        pixel_values = floats_tensor(
            [self.batch_size, 1, self.image_size, self.image_size])
        result = model(pixel_values)
        expected_num_channels = self.patch_size**2
        self.parent.assertEqual(
            result.logits.shape,
            (self.batch_size, num_patches, expected_num_channels))