Exemple #1
0
 def register_subcommand(parser: ArgumentParser):
     """
     Register this command to argparse so it's available for the transformer-cli
     :param parser: Root parser to register command-specific arguments
     :return:
     """
     serve_parser = parser.add_parser(
         "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
     )
     serve_parser.add_argument(
         "--task", type=str, choices=SUPPORTED_TASKS.keys(), help="The task to run the pipeline on"
     )
     serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
     serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
     serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
     serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
     serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
     serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
     serve_parser.add_argument(
         "--device",
         type=int,
         default=-1,
         help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
     )
     serve_parser.set_defaults(func=serve_command_factory)
Exemple #2
0
 def register_subcommand(parser: ArgumentParser):
     run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
     run_parser.add_argument("--task", choices=SUPPORTED_TASKS.keys(), help="Task to run")
     run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
     run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
     run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
     run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
     run_parser.add_argument(
         "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
     )
     run_parser.add_argument(
         "--column",
         type=str,
         help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
     )
     run_parser.add_argument(
         "--format",
         type=str,
         default="infer",
         choices=PipelineDataFormat.SUPPORTED_FORMATS,
         help="Input format to read from",
     )
     run_parser.add_argument(
         "--device",
         type=int,
         default=-1,
         help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
     )
     run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
     run_parser.set_defaults(func=run_command_factory)
Exemple #3
0
 def register_subcommand(parser: ArgumentParser):
     run_parser = parser.add_parser('run', help="Run a pipeline through the CLI")
     run_parser.add_argument('--device', type=int, default=-1, help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)')
     run_parser.add_argument('--task', choices=SUPPORTED_TASKS.keys(), help='Task to run')
     run_parser.add_argument('--model', type=str, required=True, help='Name or path to the model to instantiate.')
     run_parser.add_argument('--tokenizer', type=str, help='Name of the tokenizer to use. (default: same as the model name)')
     run_parser.add_argument('--column', type=str, required=True, help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)')
     run_parser.add_argument('--format', type=str, default='infer', choices=PipelineDataFormat.SUPPORTED_FORMATS, help='Input format to read from')
     run_parser.add_argument('--input', type=str, required=True, help='Path to the file to use for inference')
     run_parser.add_argument('--output', type=str, required=True, help='Path to the file that will be used post to write results.')
     run_parser.add_argument('kwargs', nargs='*', help='Arguments to forward to the file format reader')
     run_parser.set_defaults(func=run_command_factory)
    def test_load_default_pipelines_tf(self):
        import tensorflow as tf

        from transformers.pipelines import SUPPORTED_TASKS

        set_seed_fn = lambda: tf.random.set_seed(0)  # noqa: E731
        for task in SUPPORTED_TASKS.keys():
            if task == "table-question-answering":
                # test table in seperate test due to more dependencies
                continue

            self.check_default_pipeline(task, "tf", set_seed_fn,
                                        self.check_models_equal_tf)
    def test_load_default_pipelines_pt(self):
        import torch

        from transformers.pipelines import SUPPORTED_TASKS

        set_seed_fn = lambda: torch.manual_seed(0)  # noqa: E731
        for task in SUPPORTED_TASKS.keys():
            if task == "table-question-answering":
                # test table in seperate test due to more dependencies
                continue

            self.check_default_pipeline(task, "pt", set_seed_fn,
                                        self.check_models_equal_pt)
class PipelineCommonTests(unittest.TestCase):

    pipelines = SUPPORTED_TASKS.keys()

    @slow
    @require_tf
    def test_tf_defaults(self):
        # Test that pipelines can be correctly loaded without any argument
        for task in self.pipelines:
            with self.subTest(msg="Testing TF defaults with TF and {}".format(task)):
                pipeline(task, framework="tf")

    @slow
    @require_torch
    def test_pt_defaults(self):
        # Test that pipelines can be correctly loaded without any argument
        for task in self.pipelines:
            with self.subTest(msg="Testing Torch defaults with PyTorch and {}".format(task)):
                pipeline(task, framework="pt")
Exemple #7
0
 def register_subcommand(parser: ArgumentParser):
     """
     Register this command to argparse so it's available for the transformer-cli
     :param parser: Root parser to register command-specific arguments
     :return:
     """
     serve_parser = parser.add_parser(
         'serve',
         help=
         'CLI tool to run inference requests through REST and GraphQL endpoints.'
     )
     serve_parser.add_argument('--task',
                               type=str,
                               choices=SUPPORTED_TASKS.keys(),
                               help='The task to run the pipeline on')
     serve_parser.add_argument('--host',
                               type=str,
                               default='localhost',
                               help='Interface the server will listen on.')
     serve_parser.add_argument('--port',
                               type=int,
                               default=8888,
                               help='Port the serving will listen to.')
     serve_parser.add_argument(
         '--model', type=str, help='Model\'s name or path to stored model.')
     serve_parser.add_argument(
         '--config',
         type=str,
         help='Model\'s config name or path to stored model.')
     serve_parser.add_argument('--tokenizer',
                               type=str,
                               help='Tokenizer name to use.')
     serve_parser.add_argument(
         '--device',
         type=int,
         default=-1,
         help=
         'Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)'
     )
     serve_parser.set_defaults(func=serve_command_factory)