Exemplo n.º 1
0
    def test_run_clm(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_clm.py
            --model_name_or_path distilgpt2
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
            --do_train
            --do_eval
            --block_size 128
            --per_device_train_batch_size 5
            --per_device_eval_batch_size 5
            --num_train_epochs 2
            --output_dir {tmp_dir}
            --overwrite_output_dir
            """.split()

        if torch.cuda.device_count() > 1:
            # Skipping because there are not enough batches to train the model + would need a drop_last to work.
            return

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
            run_clm.main()
            result = get_results(tmp_dir)
            self.assertLess(result["perplexity"], 100)
    def test_run_clm_config_overrides(self):
        # test that config_overrides works, despite the misleading dumps of default un-updated
        # config via tokenizer

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_clm.py
            --model_type gpt2
            --tokenizer_name gpt2
            --train_file ./tests/fixtures/sample_text.txt
            --output_dir {tmp_dir}
            --config_overrides n_embd=10,n_head=2
            """.split()

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        logger = run_clm.logger
        with patch.object(sys, "argv", testargs):
            with CaptureLogger(logger) as cl:
                run_clm.main()

        self.assertIn('"n_embd": 10', cl.out)
        self.assertIn('"n_head": 2', cl.out)