def caffe2_export(self, tensorizers, tensor_dict, path, export_onnx_path=None): exporter = ModelExporter( ModelExporter.Config(), self.get_export_input_names(tensorizers), self.arrange_model_inputs(tensor_dict), self.vocab_to_export(tensorizers), self.get_export_output_names(tensorizers), ) return exporter.export_to_caffe2(self, path, export_onnx_path=export_onnx_path)
def caffe2_export(self, tensorizers, tensor_dict, path, export_onnx_path=None): exporter = ModelExporter( ModelExporter.Config(), ["tokens", "tokens_lens"], self.arrange_model_inputs(tensor_dict), {"tokens": list(tensorizers["tokens"].vocab)}, ["scores"], ) return exporter.export_to_caffe2(self, path, export_onnx_path=export_onnx_path)
def test_wordblstm_export_to_caffe2(self, export_num_words, num_word_classes, test_num_words, num_predictions): for WORD_CONFIG in WORD_CONFIGS: config = self._get_config(WordTaggingTask.Config, WORD_CONFIG) tensorizers, data = _NewTask._init_tensorizers(config) word_labels = [ SpecialTokens.PAD, SpecialTokens.UNK, "NoLabel", "person" ] tensorizers["labels"].vocab = Vocabulary(word_labels) tensorizers["tokens"].vocab = Vocabulary(WORD_VOCAB) py_model = _NewTask._init_model(config.model, tensorizers) dummy_test_input = self._get_rand_input_intent_slot( BATCH_SIZE, W_VOCAB_SIZE, test_num_words) exporter = ModelExporter( ModelExporter.Config(), py_model.get_export_input_names(tensorizers), dummy_test_input, py_model.vocab_to_export(tensorizers), py_model.get_export_output_names(tensorizers), ) with tempfile.NamedTemporaryFile( delete=False, suffix=".{}".format(".predictor")) as pred_file: exporter.export_to_caffe2(py_model, pred_file.name) workspace.ResetWorkspace() pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE) for _i in range(num_predictions): test_inputs = self._get_rand_input_intent_slot( BATCH_SIZE, W_VOCAB_SIZE, test_num_words) self._feed_c2_input(workspace, test_inputs, exporter.input_names, exporter.vocab_map) workspace.RunNetOnce(pred_net) word_output_names = [ "{}:{}".format("word_scores", class_name) for class_name in word_labels ] py_model.eval() py_outs = py_model(*test_inputs) context = {"seq_lens": test_inputs[-1]} target = None pred, score = py_model.get_pred(py_outs, target, context) c2_word_out = [] for o_name in word_output_names: c2_word_out.extend(list(workspace.FetchBlob(o_name))) np.testing.assert_array_almost_equal( torch.transpose(score, 1, 2).contiguous().view(-1).detach().numpy(), np.array(c2_word_out).flatten(), )
def test_seq_nn_export_to_caffe2( self, export_num_words, num_doc_classes, test_num_words, num_predictions, test_num_seq, ): config = self._get_config(SeqNNTask.Config, SEQ_NN_CONFIG) tensorizers, data = _NewTask._init_tensorizers(config) doc_labels = [SpecialTokens.UNK, "cu:other", "cu:address_Person"] tensorizers["labels"].vocab = Vocabulary(doc_labels) tensorizers["tokens"].vocab = Vocabulary(WORD_VOCAB) py_model = _NewTask._init_model(config.model, tensorizers) dummy_test_input = self._get_seq_nn_rand_input(BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq) exporter = ModelExporter( ModelExporter.Config(), py_model.get_export_input_names(tensorizers), dummy_test_input, py_model.vocab_to_export(tensorizers), py_model.get_export_output_names(tensorizers), ) with tempfile.NamedTemporaryFile( delete=False, suffix=".{}".format(".predictor")) as pred_file: output_names = exporter.export_to_caffe2(py_model, pred_file.name) workspace.ResetWorkspace() pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE) for _i in range(num_predictions): test_inputs = self._get_seq_nn_rand_input(BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq) self._feed_c2_input(workspace, test_inputs, exporter.input_names, exporter.vocab_map) workspace.RunNetOnce(pred_net) c2_out = [ list(workspace.FetchBlob(o_name)) for o_name in output_names ] py_model.eval() py_outs = py_model(*test_inputs) # Do log_softmax since we do that before exporting predictor nets py_outs = F.log_softmax(py_outs, 1) np.testing.assert_array_almost_equal( py_outs.view(-1).detach().numpy(), np.array(c2_out).flatten())
def test_contextual_intent_slot_export_to_caffe2(self, test_num_words, num_predictions, test_num_seq): config = self._get_config(IntentSlotTask.Config, CONTEXTUAL_INTENT_SLOT_CONFIG) tensorizers, data = _NewTask._init_tensorizers(config) doc_labels = ["__UNKNOWN__", "cu:other", "cu:address_Person"] word_labels = ["__UNKNOWN__", "NoLabel", "person"] tensorizers["word_labels"].vocab = Vocabulary(word_labels) tensorizers["doc_labels"].vocab = Vocabulary(doc_labels) tensorizers["tokens"].vocab = Vocabulary(WORD_VOCAB) tensorizers["seq_tokens"].vocab = Vocabulary(WORD_VOCAB) py_model = _NewTask._init_model(config.model, tensorizers) dummy_test_input = self._get_rand_input_intent_slot( BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq) exporter = ModelExporter( ModelExporter.Config(), py_model.get_export_input_names(tensorizers), dummy_test_input, py_model.vocab_to_export(tensorizers), py_model.get_export_output_names(tensorizers), ) with tempfile.NamedTemporaryFile( delete=False, suffix=".{}".format(".predictor")) as pred_file: print(pred_file.name) exporter.export_to_caffe2(py_model, pred_file.name) workspace.ResetWorkspace() pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE) for _i in range(num_predictions): test_inputs = self._get_rand_input_intent_slot( BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq) self._feed_c2_input(workspace, test_inputs, exporter.input_names, exporter.vocab_map) workspace.RunNetOnce(pred_net) doc_output_names = [ "{}:{}".format("doc_scores", class_name) for class_name in doc_labels ] word_output_names = [ "{}:{}".format("word_scores", class_name) for class_name in word_labels ] py_model.eval() logits = py_model(*test_inputs) context = {SEQ_LENS: test_inputs[-1]} target = None (d_pred, w_pred), (d_score, w_score) = py_model.get_pred(logits, target, context) c2_doc_out = [] for o_name in doc_output_names: c2_doc_out.extend(list(workspace.FetchBlob(o_name))) c2_word_out = [] for o_name in word_output_names: c2_word_out.extend(list(workspace.FetchBlob(o_name))) np.testing.assert_array_almost_equal( d_score.view(-1).detach().numpy(), np.array(c2_doc_out).flatten()) np.testing.assert_array_almost_equal( torch.transpose(w_score, 1, 2).contiguous().view(-1).detach().numpy(), np.array(c2_word_out).flatten(), )