Example #1
0
    def __init__(self, args, max_seq_length, mode='x'):
        super().__init__()
        self.max_seq_length = max_seq_length

        from lxrt.modeling import LXRTFeatureExtraction as VisualBertForLXRFeature, VISUAL_CONFIG

        set_visual_config(args, VISUAL_CONFIG)

        # Using the bert tokenizer
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased",
                                                       do_lower_case=True)

        # Build LXRT Model
        self.model = VisualBertForLXRFeature.from_pretrained(
            "bert-base-uncased", mode=mode)

        if args.from_scratch:
            print("Re-initializing all the weights")
            self.model.apply(self.model.init_bert_weights)

        self.load_pretrain_head = args.get("load_pretrain_head", False)
        if self.load_pretrain_head:
            from lxmert.src.lxrt.modeling import BertPreTrainingHeads
            self.pretrained_head = BertPreTrainingHeads(
                self.model.config,
                self.model.bert.embeddings.word_embeddings.weight)
    def __init__(self, args, mode='x'):
        super(LXRTEncoder, self).__init__()
        # self.max_seq_length = max_seq_length
        # self.max_label_text_length = max_label_text_length
        set_visual_config(args)

        # Build LXRT Model
        self.model = VisualBertForLXRFeature.from_pretrained("../user_data",
                                                             mode=mode)

        if args.from_scratch:
            print("initializing all the weights")
            self.model.apply(self.model.init_bert_weights)
Example #3
0
    def __init__(self, args, max_seq_length, mode='x'):
        super().__init__()
        self.max_seq_length = max_seq_length
        set_visual_config(args)

        # Using the bert tokenizer
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased",
                                                       do_lower_case=True)

        # Build LXRT Model
        self.model = VisualBertForLXRFeature.from_pretrained(
            "bert-base-uncased", mode=mode)

        if args.from_scratch:
            print("initializing all the weights")
            self.model.apply(self.model.init_bert_weights)
Example #4
0
    def __init__(self, args, max_seq_length, mode='x', attention=False):
        super().__init__()
        print(f"Making {__name__}")
        self.max_seq_length = max_seq_length
        set_visual_config(args)

        # Using the bert tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(
            "bert-base-uncased",
            do_lower_case=True
        )
        print("Made Tokenizer")
        # Build LXRT Model
        self.model = VisualBertForLXRFeature.from_pretrained(
            "bert-base-uncased",
            mode=mode, 
            attention=attention
        )
        print("Made VisualBertForLXRFeature")
        if args.from_scratch:
            print("initializing all the weights")
            self.model.apply(self.model.init_bert_weights)
        print(f"Done {__name__}")