Exemplo n.º 1
0
    def __init__(self, opt: Opt, dictionary: DictionaryAgent):
        super().__init__()
        self.opt = opt
        self.pad_idx = dictionary[dictionary.null_token]

        # set up embeddings
        self.embeddings = create_embeddings(
            dictionary, opt['embedding_size'], self.pad_idx
        )

        self.share_word_embedding = opt.get('share_word_embeddings', True)
        if not self.share_word_embedding:
            self.cand_embeddings = create_embeddings(
                dictionary, opt['embedding_size'], self.pad_idx
            )

        if not opt.get('learn_embeddings'):
            self.embeddings.weight.requires_grad = False
            if not self.share_word_embedding:
                self.cand_embeddings.weight.requires_grad = False

        self.reduction_type = opt.get('reduction_type', 'mean')

        self.context_encoder = self.build_context_encoder(
            opt,
            dictionary,
            self.embeddings,
            self.pad_idx,
            reduction_type=self.reduction_type,
        )

        if opt.get('share_encoders'):
            self.cand_encoder = TransformerResponseWrapper(
                self.context_encoder, self.context_encoder.out_dim
            )
        else:
            if not self.share_word_embedding:
                cand_embeddings = self.cand_embeddings
            else:
                cand_embeddings = self.embeddings
            self.cand_encoder = self.build_candidate_encoder(
                opt,
                dictionary,
                cand_embeddings,
                self.pad_idx,
                reduction_type=self.reduction_type,
            )

        # build memory encoder
        if opt.get('wrap_memory_encoder', False):
            self.memory_transformer = TransformerResponseWrapper(
                self.context_encoder, self.context_encoder.out_dim
            )
        else:
            self.memory_transformer = self.context_encoder

        self.attender = BasicAttention(
            dim=2, attn=opt['memory_attention'], residual=True
        )
Exemplo n.º 2
0
    def __init__(self, opt: Opt, dictionary: DictionaryAgent):
        self.pad_idx = dictionary[dictionary.null_token]
        self.start_idx = dictionary[dictionary.start_token]
        self.end_idx = dictionary[dictionary.end_token]
        super().__init__(self.pad_idx, self.start_idx, self.end_idx)
        self.embeddings = create_embeddings(
            dictionary, opt['embedding_size'], self.pad_idx
        )

        self.encoder = self.build_encoder(
            opt, dictionary, self.embeddings, self.pad_idx, reduction_type=None
        )
        self.decoder = self.build_decoder(opt, self.embeddings)
Exemplo n.º 3
0
    def __init__(self, opt, dictionary, retriever_shared=None):
        from parlai.agents.rag.rag import RAG_MODELS

        self.pad_idx = dictionary[dictionary.null_token]
        self.start_idx = dictionary[dictionary.start_token]
        self.end_idx = dictionary[dictionary.end_token]
        super().__init__(self.pad_idx, self.start_idx, self.end_idx)
        self.fp16 = (not opt['no_cuda'] and torch.cuda.is_available()
                     and opt.get('fp16', False))
        self.dict = dictionary
        self.embeddings = create_embeddings(dictionary, opt['embedding_size'],
                                            self.pad_idx)
        # attrs
        self.rag_model_type = opt['rag_model_type']
        self._rag_model_interface = RAG_MODELS[self.rag_model_type](
            opt, self.pad_idx)
        self.generation_model = opt['generation_model']
        self.n_extra_positions = opt['n_extra_positions']
        self.n_positions = get_n_positions_from_options(
            opt) + opt['n_extra_positions']
        assert opt['n_extra_positions'] >= 0
        self.expanded_input_truncate = min(
            opt['text_truncate'] or opt['truncate'],
            get_n_positions_from_options(opt))
        if self.n_extra_positions > 0:
            # This attribute is overloaded.
            # when n_extra_positions == 0, it is the truncation of the full expanded input
            # when >0, it is the maximum length of the knowledge tokens.
            self.expanded_input_truncate = self.n_extra_positions
        self.min_doc_token_length = opt['min_doc_token_length']

        # modules
        self.retriever = retriever_factory(opt,
                                           dictionary,
                                           shared=retriever_shared)
        self.seq2seq_encoder = self.build_encoder(
            opt,
            dictionary=dictionary,
            embedding=self.embeddings,
            padding_idx=self.pad_idx,
        )
        self.seq2seq_decoder = self.build_decoder(opt,
                                                  embedding=self.embeddings,
                                                  padding_idx=self.pad_idx)
Exemplo n.º 4
0
    def __init__(self, opt: Opt, dictionary: DictionaryAgent, **kwargs):
        self.pad_idx = dictionary[dictionary.null_token]
        self.start_idx = dictionary[dictionary.start_token]
        self.end_idx = dictionary[dictionary.end_token]
        super().__init__(self.pad_idx, self.start_idx, self.end_idx, **kwargs)
        self.opt = opt
        self.embeddings = create_embeddings(dictionary, opt['embedding_size'],
                                            self.pad_idx)

        self.encoder = self.build_encoder(
            opt,
            dictionary,
            self.embeddings,
            self.pad_idx,
            reduction_type=None,
            encoder_class=self.swappables.encoder,  # type: ignore
        )
        self.decoder = self.build_decoder(
            opt,
            embedding=self.embeddings,
            decoder_class=self.swappables.decoder,  # type: ignore
        )