Esempio n. 1
0
    def __init__(self, opt):
        super().__init__(opt)
        # initialize from vocab path
        warn_once(
            'WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored'
        )
        download(opt['datapath'])
        vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models',
                                  VOCAB_PATH)
        self.tokenizer = BertTokenizer.from_pretrained(vocab_path)

        self.start_token = '[CLS]'
        self.end_token = '[SEP]'
        self.null_token = '[PAD]'
        self.start_idx = self.tokenizer.convert_tokens_to_ids(
            ['[CLS]'])[0]  # should be 101
        self.end_idx = self.tokenizer.convert_tokens_to_ids(
            ['[SEP]'])[0]  # should be 102
        self.pad_idx = self.tokenizer.convert_tokens_to_ids(
            ['[PAD]'])[0]  # should be 0
        # set tok2ind for special tokens
        self.tok2ind[self.start_token] = self.start_idx
        self.tok2ind[self.end_token] = self.end_idx
        self.tok2ind[self.null_token] = self.pad_idx
        # set ind2tok for special tokens
        self.ind2tok[self.start_idx] = self.start_token
        self.ind2tok[self.end_idx] = self.end_token
        self.ind2tok[self.pad_idx] = self.null_token
Esempio n. 2
0
 def __init__(self, opt, shared=None):
     # download pretrained models
     download(opt['datapath'])
     self.pretrained_path = os.path.join(opt['datapath'], 'models',
                                         'bert_models', MODEL_PATH)
     opt['pretrained_path'] = self.pretrained_path
     super().__init__(opt, shared)
Esempio n. 3
0
    def __init__(self, opt):
        super().__init__(opt)
        # initialize from vocab path
        download(opt['datapath'])
        vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)
        self.tokenizer = BertTokenizer.from_pretrained(vocab_path)

        self.start_token = '[CLS]'
        self.end_token = '[SEP]'
        self.null_token = '[PAD]'
        self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[
            0
        ]  # should be 101
        self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[
            0
        ]  # should be 102
        self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0]  # should be 0
        # set tok2ind for special tokens
        self.tok2ind[self.start_token] = self.start_idx
        self.tok2ind[self.end_token] = self.end_idx
        self.tok2ind[self.null_token] = self.pad_idx
        # set ind2tok for special tokens
        self.ind2tok[self.start_idx] = self.start_token
        self.ind2tok[self.end_idx] = self.end_token
        self.ind2tok[self.pad_idx] = self.null_token
Esempio n. 4
0
    def __init__(self, opt):
        super().__init__(opt)
        # initialize from vocab path
        warn_once(
            "WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored"
        )
        download(opt["datapath"])
        vocab_path = PathManager.get_local_path(
            os.path.join(opt["datapath"], "models", "bert_models", VOCAB_PATH))
        self.tokenizer = BertTokenizer.from_pretrained(vocab_path)

        self.start_token = "[CLS]"
        self.end_token = "[SEP]"
        self.null_token = "[PAD]"
        self.start_idx = self.tokenizer.convert_tokens_to_ids(
            ["[CLS]"])[0]  # should be 101
        self.end_idx = self.tokenizer.convert_tokens_to_ids(
            ["[SEP]"])[0]  # should be 102
        self.pad_idx = self.tokenizer.convert_tokens_to_ids(
            ["[PAD]"])[0]  # should be 0
        # set tok2ind for special tokens
        self.tok2ind[self.start_token] = self.start_idx
        self.tok2ind[self.end_token] = self.end_idx
        self.tok2ind[self.null_token] = self.pad_idx
        # set ind2tok for special tokens
        self.ind2tok[self.start_idx] = self.start_token
        self.ind2tok[self.end_idx] = self.end_token
        self.ind2tok[self.pad_idx] = self.null_token
 def __init__(self, opt, shared=None):
     # download pretrained models
     download(opt['datapath'])
     self.pretrained_path = os.path.join(opt['datapath'], 'models',
                                         'bert_models', MODEL_PATH)
     opt['pretrained_path'] = self.pretrained_path
     self.add_cls_token = opt.get('add_cls_token', True)
     self.sep_last_utt = opt.get('sep_last_utt', False)
     super().__init__(opt, shared)
Esempio n. 6
0
 def __init__(self, opt, shared=None):
     # download pretrained models
     download(opt["datapath"])
     self.pretrained_path = PathManager.get_local_path(
         os.path.join(opt["datapath"], "models", "bert_models", MODEL_PATH))
     opt["pretrained_path"] = self.pretrained_path
     self.add_cls_token = opt.get("add_cls_token", True)
     self.sep_last_utt = opt.get("sep_last_utt", False)
     super().__init__(opt, shared)
Esempio n. 7
0
    def __init__(self, opt, shared=None):
        # download pretrained models
        download(opt['datapath'])
        self.pretrained_path = os.path.join(opt['datapath'], 'models',
                                            'bert_models', MODEL_PATH)

        super().__init__(opt, shared)
        # it's easier for now to use DataParallel when
        self.clip = -1
        self.NULL_IDX = self.dict.pad_idx
        self.START_IDX = self.dict.start_idx
        self.END_IDX = self.dict.end_idx
Esempio n. 8
0
    def __init__(self, opt, shared=None):
        # download pretrained models
        download(opt['datapath'])
        self.pretrained_path = os.path.join(opt['datapath'], 'models',
                                            'bert_models', MODEL_PATH)
        opt['pretrained_path'] = self.pretrained_path

        self.clip = -1

        super().__init__(opt, shared)
        # it's easier for now to use DataParallel when
        self.NULL_IDX = self.dict.pad_idx
        self.START_IDX = self.dict.start_idx
        self.END_IDX = self.dict.end_idx
        # default one does not average
        self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True,
                                                   size_average=True)
    def __init__(self, opt, shared=None):
        # download pretrained models
        download(opt['datapath'])
        self.pretrained_path = os.path.join(opt['datapath'], 'models',
                                            'bert_models', MODEL_PATH)

        super().__init__(opt, shared)
        # it's easier for now to use DataParallel when
        self.data_parallel = opt.get('data_parallel') and self.use_cuda
        if self.data_parallel:
            self.model = torch.nn.DataParallel(self.model)
        if is_distributed():
            raise ValueError(
                'Cannot combine --data-parallel and distributed mode')
        self.clip = -1
        self.NULL_IDX = self.dict.pad_idx
        self.START_IDX = self.dict.start_idx
        self.END_IDX = self.dict.end_idx
    def __init__(self, opt, shared=None):
        # download pretrained models
        download(opt['datapath'])
        self.pretrained_path = os.path.join(
            opt['datapath'], 'models', 'bert_models', MODEL_PATH
        )
        opt['pretrained_path'] = self.pretrained_path

        self.clip = -1

        super().__init__(opt, shared)
        # it's easier for now to use DataParallel when
        self.data_parallel = opt.get('data_parallel') and self.use_cuda
        if self.data_parallel and shared is None:
            self.model = torch.nn.DataParallel(self.model)
        if is_distributed():
            raise ValueError('Cannot combine --data-parallel and distributed mode')
        self.NULL_IDX = self.dict.pad_idx
        self.START_IDX = self.dict.start_idx
        self.END_IDX = self.dict.end_idx
        # default one does not average
        self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)