def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our BEiT structure. """ # define default BEiT configuration has_lm_head = False if "rvlcdip" in checkpoint_url else True config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 # labels if "rvlcdip" in checkpoint_url: config.num_labels = 16 repo_id = "datasets/huggingface/label-files" filename = "rvlcdip-id2label.json" id2label = json.load( open(cached_download(hf_hub_url(repo_id, filename)), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load state_dict of original model, remove and rename some keys state_dict = torch.hub.load_state_dict_from_url( checkpoint_url, map_location="cpu")["model"] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head) # load HuggingFace model model = BeitForMaskedImageModeling( config) if has_lm_head else BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # Check outputs on an image feature_extractor = BeitFeatureExtractor(size=config.image_size, resample=Image.BILINEAR, do_center_crop=False) image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) logits = outputs.logits # verify logits expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size( expected_shape), "Shape of logits not as expected" Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: if has_lm_head: model_name = "dit-base" if "base" in checkpoint_url else "dit-large" else: model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" feature_extractor.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add feature extractor", use_temp_dir=True, ) model.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add model", use_temp_dir=True, )
def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our BEiT structure. """ # define default BEiT configuration config = BeitConfig() has_lm_head = False is_semantic = False repo_id = "datasets/huggingface/label-files" # set config parameters based on URL if checkpoint_url[-9:-4] == "pt22k": # masked image modeling config.use_shared_relative_position_bias = True config.use_mask_token = True has_lm_head = True elif checkpoint_url[-9:-4] == "ft22k": # intermediate fine-tuning on ImageNet-22k config.use_relative_position_bias = True config.num_labels = 21841 filename = "imagenet-22k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} # this dataset contains 21843 labels but the model only has 21841 # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18 del id2label[9205] del id2label[15027] config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} elif checkpoint_url[-8:-4] == "to1k": # fine-tuning on ImageNet-1k config.use_relative_position_bias = True config.num_labels = 1000 filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if "384" in checkpoint_url: config.image_size = 384 if "512" in checkpoint_url: config.image_size = 512 elif "ade20k" in checkpoint_url: # fine-tuning config.use_relative_position_bias = True config.num_labels = 150 filename = "ade20k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} config.image_size = 640 is_semantic = True else: raise ValueError( "Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'" ) # size of the architecture if "base" in checkpoint_url: pass elif "large" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 if "ade20k" in checkpoint_url: config.image_size = 640 config.out_indices = [7, 11, 15, 23] else: raise ValueError( "Should either find 'base' or 'large' in checkpoint URL") # load state_dict of original model, remove and rename some keys state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True) state_dict = state_dict[ "model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic) if is_semantic: # add prefix to decoder keys for key, val in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith("backbone.fpn"): key = key.replace("backbone.fpn", "fpn") state_dict[key] = val # load HuggingFace model if checkpoint_url[-9:-4] == "pt22k": model = BeitForMaskedImageModeling(config) elif "ade20k" in checkpoint_url: model = BeitForSemanticSegmentation(config) else: model = BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # Check outputs on an image if is_semantic: feature_extractor = BeitFeatureExtractor(size=config.image_size, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(ds[0]["file"]) else: feature_extractor = BeitFeatureExtractor(size=config.image_size, resample=Image.BILINEAR, do_center_crop=False) image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) logits = outputs.logits # verify logits expected_shape = torch.Size([1, 1000]) if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"): expected_shape = torch.Size([1, 196, 8192]) elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"): expected_shape = torch.Size([1, 196, 8192]) elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"): expected_shape = torch.Size([1, 21841]) expected_logits = torch.tensor([2.2288, 2.4671, 0.7395]) expected_class_idx = 2397 elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"): expected_shape = torch.Size([1, 21841]) expected_logits = torch.tensor([1.6881, -0.2787, 0.5901]) expected_class_idx = 2396 elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"): expected_logits = torch.tensor([0.1241, 0.0798, -0.6569]) expected_class_idx = 285 elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"): expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108]) expected_class_idx = 281 elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"): expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"): expected_logits = torch.tensor([0.4610, -0.0928, 0.2086]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_large_patch16_224_pt22k_ft22kto1k"): expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_large_patch16_384_pt22k_ft22kto1k"): expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_large_patch16_512_pt22k_ft22kto1k"): expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_base_patch16_640_pt22k_ft22ktoade20k"): expected_shape = (1, 150, 160, 160) expected_logits = torch.tensor([ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ]) elif checkpoint_url[:-4].endswith( "beit_large_patch16_640_pt22k_ft22ktoade20k"): expected_shape = (1, 150, 160, 160) expected_logits = torch.tensor([ [[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]], [[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]], [[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]], ]) else: raise ValueError("Can't verify logits as model is not supported") assert logits.shape == expected_shape, "Shape of logits not as expected" if not has_lm_head: if is_semantic: assert torch.allclose( logits[0, :3, :3, :3], expected_logits, atol=1e-3), "First elements of logits not as expected" else: print("Predicted class idx:", logits.argmax(-1).item()) assert torch.allclose( logits[0, :3], expected_logits, atol=1e-3), "First elements of logits not as expected" assert logits.argmax(-1).item( ) == expected_class_idx, "Predicted class index not as expected" Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(pytorch_dump_folder_path)
def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our BEiT structure. """ # define default BEiT configuration config = BeitConfig() has_lm_head = False repo_id = "datasets/huggingface/label-files" # set config parameters based on URL if checkpoint_url[-9:-4] == "pt22k": # masked image modeling config.use_shared_relative_position_bias = True config.use_mask_token = True has_lm_head = True elif checkpoint_url[-9:-4] == "ft22k": # intermediate fine-tuning on ImageNet-22k config.use_relative_position_bias = True config.num_labels = 21841 filename = "imagenet-22k-id2label.json" id2label = json.load( open(cached_download(hf_hub_url(repo_id, filename)), "r")) id2label = {int(k): v for k, v in id2label.items()} # this dataset contains 21843 labels but the model only has 21841 # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18 del id2label[9205] del id2label[15027] config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} elif checkpoint_url[-8:-4] == "to1k": # fine-tuning on ImageNet-1k config.use_relative_position_bias = True config.num_labels = 1000 filename = "imagenet-1k-id2label.json" id2label = json.load( open(cached_download(hf_hub_url(repo_id, filename)), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if "384" in checkpoint_url: config.image_size = 384 if "512" in checkpoint_url: config.image_size = 512 else: raise ValueError( "Checkpoint not supported, URL should either end with 'pt22k', 'ft22k' or 'to1k'" ) # size of the architecture if "base" in checkpoint_url: pass elif "large" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 else: raise ValueError( "Should either find 'base' or 'large' in checkpoint URL") # load state_dict of original model, remove and rename some keys state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)["model"] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head) # load HuggingFace model if checkpoint_url[-9:-4] == "pt22k": model = BeitForMaskedImageModeling(config) else: model = BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # Check outputs on an image feature_extractor = BeitFeatureExtractor(size=config.image_size, resample=Image.BILINEAR, do_center_crop=False) encoding = feature_extractor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) logits = outputs.logits # verify logits expected_shape = torch.Size([1, 1000]) if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"): expected_shape = torch.Size([1, 196, 8192]) elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"): expected_shape = torch.Size([1, 196, 8192]) elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"): expected_shape = torch.Size([1, 21841]) expected_logits = torch.tensor([2.2288, 2.4671, 0.7395]) expected_class_idx = 2397 elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"): expected_shape = torch.Size([1, 21841]) expected_logits = torch.tensor([1.6881, -0.2787, 0.5901]) expected_class_idx = 2396 elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"): expected_logits = torch.tensor([0.1241, 0.0798, -0.6569]) expected_class_idx = 285 elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"): expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108]) expected_class_idx = 281 elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"): expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"): expected_logits = torch.tensor([0.4610, -0.0928, 0.2086]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_large_patch16_224_pt22k_ft22kto1k"): expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_large_patch16_384_pt22k_ft22kto1k"): expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith( "beit_large_patch16_512_pt22k_ft22kto1k"): expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852]) expected_class_idx = 761 else: raise ValueError("Can't verify logits as model is not supported") assert logits.shape == expected_shape, "Shape of logits not as expected" print("Shape of logits:", logits.shape) if not has_lm_head: print("Predicted class idx:", logits.argmax(-1).item()) assert torch.allclose( logits[0, :3], expected_logits, atol=1e-3), "First elements of logits not as expected" assert logits.argmax(-1).item( ) == expected_class_idx, "Predicted class index not as expected" Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(pytorch_dump_folder_path)