示例#1
0
def change_max_pos_embd(args, new_mpe_size, n_classes):
    config = TransformerConfig(size=args.transformer_size,
                               max_position_embeddings=new_mpe_size)
    if args.use_cnn:
        config.input_size = CnnConfig.output_dim
    model = Transformer(config=config, n_classes=n_classes)
    model = model.to(device)
    return model
示例#2
0
    keypoints_dir=save_dir,
    max_frame_len=169,
)

dataloader = data.DataLoader(
    dataset,
    batch_size=1,
    shuffle=False,
    num_workers=4,
    pin_memory=True,
)
label_map = dict(zip(label_map.values(), label_map.keys()))

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = TransformerConfig(size="large", max_position_embeddings=256)
model = Transformer(config=config, n_classes=263)
model = model.to(device)

pretrained_model_name = "include_no_cnn_transformer_large.pth"
pretrained_model_links = load_json("pretrained_links.json")
if not os.path.isfile(pretrained_model_name):
    link = pretrained_model_links[pretrained_model_name]
    torch.hub.download_url_to_file(link, pretrained_model_name, progress=True)

ckpt = torch.load(pretrained_model_name)
model.load_state_dict(ckpt["model"])
print("### Model loaded ###")

preds = inference(dataloader, model, device, label_map)
print(json.dumps(preds, indent=2))