def test_mnist_all_data(self): """Generic test using all the available data (10,000)""" py_args = self.generic_arguments.copy() py_args["--epochs"] = 2 py_args["--batch-size"] = 10 py_args["--batches-per-step"] = 1000 test_util.run_test_helper(run_popart_mnist_training, **py_args)
def test_mnist_log_graph_trace(self): """Basic test with log-graph-trace argument""" py_args = self.generic_arguments.copy() py_args["--log-graph-trace"] = "" py_args["--epochs"] = 1 run_test_helper( run_popart_mnist_training, **py_args )
def test_mnist_simulation(self): """Simulation test with basic arguments""" py_args = self.generic_arguments.copy() py_args["--simulation"] = "" py_args["--epochs"] = 1 run_test_helper( run_popart_mnist_training, **py_args )
def test_mnist_conv_simulation(self): """Simulation test with basic arguments - This simulation takes around 838s (~14 minutes) to complete""" py_args = self.generic_arguments.copy() py_args["--simulation"] = "" py_args["--batch-size"] = 1 py_args["--batches-per-step"] = 1 py_args["--epochs"] = 1 test_util.run_test_helper(run_popart_mnist_training, **py_args)
def test_mnist_train(self): """Generic test on default arguments in training""" py_args = self.generic_arguments.copy() out = test_util.run_test_helper(run_popart_mnist_training, **py_args) expected_accuracy = [98.41] test_util.parse_results_for_accuracy(out, expected_accuracy, self.accuracy_tolerances)
def test_output(self): """Generic test on default arguments in training""" out = run_test_helper(run_sparse_attention) prob = None grad = None prob_expected = 0.00098 prob_tol = 0.00001 grad_expected = 2e-11 grad_tol = 1e-11 for line in out.split("\n"): if re.match(r"Probability mean (-?[\d]+(?:.[\d]+)?(?:e[+-]?\d+)?)", line): prob = float( re.match( r"Probability mean (-?[\d]+(?:.[\d]+)?(?:e[+-]?\d+)?)", line).groups()[0]) elif re.match( r"Logits grad mean (-?[\d]+(?:.[\d]+)?(?:e[+-]?\d+)?)", line): grad = float( re.match( r"Logits grad mean (-?[\d]+(?:.[\d]+)?(?:e[+-]?\d+)?)", line).groups()[0]) if prob is None: assert False, "'Probability mean ...' pattern was not found in the output" else: assert abs(prob_expected - prob) < prob_tol if grad is None: assert False, "'Logits grad mean ...' pattern was not found in the output" else: assert abs(grad_expected - grad) < grad_tol
def test_train_32_100_10(self): """Generic functional test""" py_args = self.generic_arguments.copy() out = run_test_helper(run_pytorch_mnist, **py_args) expected_accuracy = [ 87.65, 88.06, 88.4, 88.43, 88.68, 88.71, 88.69, 88.89, 88.85, 88.61 ] parse_results_for_accuracy(out, expected_accuracy, 2.5)
def test_mnist_train(self): """Generic test on default arguments in training""" py_args = self.generic_arguments.copy() out = run_test_helper(run_popart_mnist_training, **py_args) expected_accuracy = [ 88.88, 89.63, 89.83, 90.01, 90.12, 90.22, 90.40, 90.59, 90.65, 90.70 ] parse_results_for_accuracy(out, expected_accuracy, self.accuracy_tolerances)
def test_mnist_train(self): """Generic test on default arguments in training""" py_args = self.generic_arguments.copy() out = run_test_helper(run_mnist_training, **py_args) expected_accuracy = [ 50.32, 58.67, 68.09, 72.81, 75.30, 77.14, 78.53, 79.75, 80.62, 81.37 ] parse_results_for_accuracy(out, expected_accuracy, self.accuracy_tolerances)
def test_mnist_train_sharded(self): """Generic test on default arguments in training over 2 IPUs""" py_args = self.generic_arguments.copy() py_args["--num-ipus"] = 2 out = run_test_helper( run_popart_mnist_training, **py_args ) expected_accuracy = [90.70] parse_results_for_accuracy( out, expected_accuracy, self.accuracy_tolerances )
def test_mnist_train_sharded_pipelined(self): """Generic test on default arguments in training over 2 IPUs and pipelined""" py_args = self.generic_arguments.copy() py_args["--num-ipus"] = 2 py_args["--pipeline"] = "" out = run_test_helper(run_popart_mnist_training, **py_args) expected_accuracy = [ 88.11, 88.69, 88.91, 88.94, 88.92, 88.98, 89.05, 89.14, 89.18, 89.25 ] parse_results_for_accuracy(out, expected_accuracy, self.accuracy_tolerances)
def test_mnist_train_replicated(self): """Generic test on default arguments in training over 2 IPUs with replication""" py_args = self.generic_arguments.copy() py_args["--num-ipus"] = 2 py_args["--replication-factor"] = 2 out = run_test_helper(run_popart_mnist_training, **py_args) expected_accuracy = [ 88.88, 89.63, 89.83, 90.01, 90.12, 90.22, 90.40, 90.59, 90.65, 90.70 ] parse_results_for_accuracy(out, expected_accuracy, self.accuracy_tolerances)
def test_mnist_complete(self): """Tests that the model trains and a pvti file is created""" py_args = self.generic_arguments.copy() out = run_test_helper( run_popart_mnist_complete, **py_args ) expected_accuracy = [88.88] parse_results_for_accuracy( out, expected_accuracy, self.accuracy_tolerances ) # Verify that the pvti file is created if not any(fname.endswith(".pvti") for fname in os.listdir(os.path.dirname(__file__))): raise AssertionError("No pvti file found")
def test_mnist_train_replicated_pipelined(self): """Generic test on default arguments in training over 2 IPUs and pipelined""" py_args = self.generic_arguments.copy() py_args["--num-ipus"] = 4 py_args["--replication-factor"] = 2 py_args["--pipeline"] = "" out = run_test_helper( run_popart_mnist_training, **py_args ) expected_accuracy = [89.25] parse_results_for_accuracy( out, expected_accuracy, self.accuracy_tolerances )
def test_train_32_100_1_simulation(self): """Generic test in simulation mode""" py_args = self.generic_arguments.copy() py_args["--simulation"] = "" py_args["--epochs"] = 1 run_test_helper(run_pytorch_mnist, **py_args)
def test_log_graph_trace_arg(self): """Generic test exercising log graph trace argument""" py_args = self.generic_arguments.copy() py_args["--log-graph-trace"] = "" py_args["--epochs"] = 1 run_test_helper(run_pytorch_mnist, **py_args)
def test_output(self): """Generic test on default arguments in training""" out = run_test_helper(run_sparse_softmax_subblock)