def testListModels(self): @registry.register_model def m1(): pass @registry.register_model def m2(): pass self.assertSetEqual(set(["m1", "m2"]), set(registry.list_models()))
# -*- coding: utf-8 -*- """ @author: 代码医生工作室 @公众号:xiangyuejiqiren (内有更多优秀文章及学习资料) @来源: <深度学习之TensorFlow工程化项目实战>配套代码 (700+页) @配套代码技术支持:bbs.aianaconda.com (有问必答) """ #6-19 import tensorflow as tf from tensor2tensor import models from tensor2tensor.utils import t2t_model from tensor2tensor.utils import registry print(len(registry.list_models()), registry.list_models()) print(registry.model('transformer')) print(len(registry.list_hparams()), registry.list_hparams('transformer')) print(registry.hparams('transformer_base_v1'))
# # Example inputs as int-tensor. # print("Inputs, encoded:") # print(inputs) # print("Inputs, decoded:") # # Example inputs as a sentence. # print(decode(inputs)) # # Example targets as int-tensor. # print("Targets, encoded:") # print(targets) # # Example targets as a sentence. # print("Targets, decoded:") # print(decode(targets)) # There are many models available in Tensor2Tensor registry.list_models() # Create hparams and the model model_name = "transformer" hparams_set = "transformer_base" hparams = trainer_lib.create_hparams(hparams_set, data_dir=data_dir, problem_name="translate_ende_wmt32k") # NOTE: Only create the model once when restoring from a checkpoint; it's a # Layer and so subsequent instantiations will have different variable scopes # that will not match the checkpoint. translate_model = registry.model(model_name)(hparams, Modes.EVAL) # Copy the pretrained checkpoint locally
def testModelsImported(self): models = registry.list_models() self.assertTrue("lstm_seq2seq" in models)
def testModelsImported(self): models = registry.list_models() self.assertTrue("baseline_lstm_seq2seq" in models)
# In[ ]: PROBLEM = "translate_enfr_wmt32k" # We chose a problem translation English to French with 32.768 vocabulary MODEL = "transformer" # Our model HPARAMS = "transformer_big" # Hyperparameters for the model by default # If you have a one gpu, use transformer_big_single_gpu # In[ ]: #Show all problems and models from tensor2tensor.utils import registry from tensor2tensor import problems problems.available() #Show all problems registry.list_models() #Show all registered models #or ## #Command line # get_ipython().system('t2t-trainer --registry_help #Show all problems') # get_ipython().system('t2t-trainer --problems_help #Show all models') # # 2. Data generation # # Generate the data (download the dataset and generate the data). # # --- # # You can choose between command line or code.
# Setup helper functions for encoding and decoding def encode(input_str, output_str=None): """Input str to features dict, ready for inference""" inputs = encoders["inputs"].encode(input_str) + [1] # add EOS id batch_inputs = tf.reshape(inputs, [1, -1, 1]) # Make it 3D. return {"inputs": batch_inputs} def decode(integers): """List of ints to str""" integers = list(np.squeeze(integers)) if 1 in integers: integers = integers[:integers.index(1)] return encoders["inputs"].decode(np.squeeze(integers)) # There are many models available in Tensor2Tensor print(registry.list_models()) # Create hparams and the model model_name = "invertible_ut" hparams_set = "universal_transformer_tiny" hparams = trainer_lib.create_hparams(hparams_set) ############ ## CHANGE ## ############ # Adding parameter for determining whether the weights should be shared among the two layers for invertibility or not hparams.add_hparam("invertible_share_layer_weights", False) FLAGS.problems = problem_name FLAGS.model = model_name