def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.data_prepared = False super().__init__(cfg=cfg, trainer=trainer) self.encoder = SGDEncoder( hidden_size=self.bert_model.config.hidden_size, dropout=self._cfg.encoder.dropout) self.decoder = SGDDecoder( embedding_dim=self.bert_model.config.hidden_size) self.loss = SGDDialogueStateLoss(reduction="mean")
def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.data_prepared = False self.setup_tokenizer(cfg.tokenizer) super().__init__(cfg=cfg, trainer=trainer) self.bert_model = get_lm_model( pretrained_model_name=cfg.language_model.pretrained_model_name, config_file=cfg.language_model.config_file, config_dict=OmegaConf.to_container(cfg.language_model.config) if cfg.language_model.config else None, checkpoint_file=cfg.language_model.lm_checkpoint, ) self.encoder = SGDEncoder(hidden_size=self.bert_model.config.hidden_size, dropout=self._cfg.encoder.dropout) self.decoder = SGDDecoder(embedding_dim=self.bert_model.config.hidden_size) self.loss = SGDDialogueStateLoss(reduction="mean")