def load_saved_model(): if model_type == 'seq2seq': model = Seq2SeqIntentModel() else: model = MultiTaskIntentModel() model.load(args.model_path) return model
def load_model(self): if self.model_type == 'seq2seq': model = Seq2SeqIntentModel() else: model = MultiTaskIntentModel() model.load(self.pretrained_model) self.model = model
def load_model(self): with open(IntentExtractionApi.pretrained_model_info, "rb") as fp: model_info = pickle.load(fp) self.model_type = model_info["type"] self.word_vocab = model_info["word_vocab"] self.tags_vocab = {v: k for k, v in model_info["tags_vocab"].items()} if self.model_type == "mtl": self.char_vocab = model_info["char_vocab"] self.intent_vocab = { v: k for k, v in model_info["intent_vocab"].items() } model = MultiTaskIntentModel() else: model = Seq2SeqIntentModel() model.load(self.pretrained_model) self.model = model
def load_model(self): with open(IntentExtractionApi.pretrained_model_info, 'rb') as fp: model_info = pickle.load(fp) self.model_type = model_info['type'] self.word_vocab = model_info['word_vocab'] self.tags_vocab = {v: k for k, v in model_info['tags_vocab'].items()} if self.model_type == 'mtl': self.char_vocab = model_info['char_vocab'] self.intent_vocab = { v: k for k, v in model_info['intent_vocab'].items() } model = MultiTaskIntentModel() else: model = Seq2SeqIntentModel() model.load(self.pretrained_model) self.model = model
parser.add_argument('--model_info_path', type=str, default='model_info.dat', help='Path for saving model topology') args = parser.parse_args() validate_input_args() dataset = SNIPS(path=args.dataset_path, sentence_length=args.sentence_length) train_x, _, train_i, train_y = dataset.train_set test_x, _, test_i, test_y = dataset.test_set test_y = to_categorical(test_y, dataset.label_vocab_size) train_y = to_categorical(train_y, dataset.label_vocab_size) model = Seq2SeqIntentModel() model.build(dataset.word_vocab_size, dataset.label_vocab_size, args.token_emb_size, args.encoder_depth, args.decoder_depth, args.lstm_hidden_size, args.encoder_dropout, args.decoder_dropout) conll_cb = ConllCallback(test_x, test_y, dataset.tags_vocab.vocab, batch_size=args.b) # train model model.fit(x=train_x, y=train_y, batch_size=args.b, epochs=args.e,