def setUp(self): test_utils.setup_proxy() setup_imports() replace_with_jit() model_name = "visual_bert" args = test_utils.dummy_args(model=model_name) configuration = Configuration(args) config = configuration.get_config() model_config = config.model_config[model_name] model_config.model = model_name self.pretrain_model = build_model(model_config)
def setUp(self): test_utils.setup_proxy() setup_imports() replace_with_jit() model_name = "visual_bert" args = test_utils.dummy_args(model=model_name) configuration = Configuration(args) config = configuration.get_config() model_config = config.model_config[model_name] model_config["training_head_type"] = "classification" model_config["num_labels"] = 2 model_config.model = model_name self.finetune_model = build_model(model_config)
def setUp(self): setup_imports() replace_with_jit() model_name = "visual_bert" args = test_utils.dummy_args(model=model_name) configuration = Configuration(args) config = configuration.get_config() model_class = registry.get_model_class(model_name) self.pretrain_model = model_class(config.model_config[model_name]) self.pretrain_model.build() config.model_config[model_name][ "training_head_type"] = "classification" config.model_config[model_name]["num_labels"] = 2 self.finetune_model = model_class(config.model_config[model_name]) self.finetune_model.build()
def __init__(self, config, *args, **kwargs): super().__init__(config, *args, **kwargs) # Replace transformer layers with scriptable JIT layers replace_with_jit()
def test_undo_replace_with_jit(self): original_function = BertSelfAttention.forward replace_with_jit() undo_replace_with_jit() self.assertTrue(BertSelfAttention.forward is original_function)