def test_TFOpenAIGPTModel(self): from transformers import OpenAIGPTConfig, TFOpenAIGPTModel keras.backend.clear_session() # pretrained_weights = 'openai-gpt' tokenizer_file = 'openai_openai-gpt.pickle' tokenizer = self._get_tokenzier(tokenizer_file) text, inputs, inputs_onnx = self._prepare_inputs(tokenizer) config = OpenAIGPTConfig() model = TFOpenAIGPTModel(config) predictions = model.predict(inputs) onnx_model = keras2onnx.convert_keras(model, model.name) self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def __init__( self, reduce_output='sum', pretrained_model_name_or_path='openai-gpt', trainable=True, num_tokens=None, **kwargs ): super(GPTEncoder, self).__init__() try: from transformers import TFOpenAIGPTModel except ModuleNotFoundError: logger.error( ' transformers is not installed. ' 'In order to install all text feature dependencies run ' 'pip install ludwig[text]' ) sys.exit(-1) self.transformer = TFOpenAIGPTModel.from_pretrained( pretrained_model_name_or_path ) self.reduce_output = reduce_output self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output) self.transformer.trainable = trainable self.transformer.resize_token_embeddings(num_tokens)
def test_TFOpenAIGPTModel(self): from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel pretrained_weights = 'openai-gpt' tokenizer = OpenAIGPTTokenizer.from_pretrained(pretrained_weights) text, inputs, inputs_onnx = self._prepare_inputs(tokenizer) model = TFOpenAIGPTModel.from_pretrained(pretrained_weights) predictions = model.predict(inputs) onnx_model = keras2onnx.convert_keras(model, model.name) self.assertTrue( run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))