Example #1
0
    def testNew(self):
        A = CuMatrix()
        self.assertIsNotNone(A)
        self.assertEqual(0, A.num_rows())
        self.assertEqual(0, A.num_cols())

        dim = A.dim()
        self.assertEqual(0, dim.rows)
        self.assertEqual(0, dim.cols)

        A = CuMatrix.new_from_size(10, 10)
        self.assertIsNotNone(A)
        self.assertEqual(10, A.num_rows())
        self.assertEqual(10, A.num_cols())

        dim = A.dim()
        self.assertEqual(10, dim.rows)
        self.assertEqual(10, dim.cols)

        A = CuMatrix.new_from_matrix(Matrix([[2, 3], [5, 7]]))
        self.assertIsNotNone(A)
        self.assertEqual(2, A.num_rows())
        self.assertEqual(2, A.num_cols())

        B = CuMatrix.new_from_other(A)
        self.assertIsNotNone(B)
        self.assertEqual(2, B.num_rows())
        self.assertEqual(2, B.num_cols())
Example #2
0
    def test__getitem(self):
        A = CuMatrix.new_from_matrix(Matrix.new(np.arange(10).reshape((5, 2))))
        self.assertEqual(0.0, A.__getitem(0, 0))
        self.assertEqual(1.0, A.__getitem(0, 1))
        self.assertEqual(2.0, A.__getitem(1, 0))
        self.assertEqual(3.0, A.__getitem(1, 1))
        self.assertEqual(4.0, A.__getitem(2, 0))

        # This should hard crash
        with self.assertRaises(IndexError):
            self.assertEqual(0.0, A.__getitem(0, 2))
Example #3
0
    def testSwap(self):
        for i in range(10):
            dim = (10 * i, 4 * i)
            M = Matrix(np.random.random(dim))
            A = CuMatrix.new_from_matrix(M)
            B = CuMatrix.new_from_size(A.num_rows(), A.num_cols())
            B.Swap(A)
            self.assertAlmostEqual(A.sum(), B.sum(), places = 4) #Kaldi's precision is aweful
            self.assertAlmostEqual(M.sum(), B.sum(), places = 4) #Kaldi's precision is aweful

            C = CuMatrix.new_from_size(M.shape[0], M.shape[1])
            C.SwapWithMatrix(M)
            self.assertAlmostEqual(B.sum(), C.sum(), places = 4) #Kaldi's precision is aweful
Example #4
0
    def test_nnet_compute(self):
        gen_config = NnetGenerationOptions()
        test_collapse_model = random.choice([True, False])

        configs = generate_config_sequence(gen_config)
        nnet = Nnet()
        for j, config in enumerate(configs):
            print("Input config[{}]:".format(j))
            print(config)
            istrm = istringstream.from_str(config)
            nnet.read_config(istrm)

        request = ComputationRequest()
        inputs = compute_example_computation_request_simple(nnet, request)
        if test_collapse_model:
            set_batchnorm_test_mode(True, nnet)
            set_dropout_test_mode(True, nnet)

        compiler = Compiler(request, nnet)
        opts = CompilerOptions()
        computation = compiler.create_computation(opts)

        nnet_collapsed = Nnet.new_from_other(nnet)
        if test_collapse_model:
            collapse_config = CollapseModelConfig()
            collapse_model(collapse_config, nnet_collapsed)
            compiler_collapsed = Compiler(request, nnet_collapsed)
            computation_collapsed = compiler_collapsed.create_computation(opts)
            computation_collapsed.compute_cuda_indexes()

        ostrm = ostringstream()
        computation.print_computation(ostrm, nnet)
        print("Generated computation:")
        print(ostrm.to_str())

        check_config = CheckComputationOptions()
        check_config.check_rewrite = True
        checker = ComputationChecker(check_config, nnet, computation)
        checker.check()

        if random.choice([True, False]):
            opt_config = NnetOptimizeOptions()
            optimize(opt_config, nnet, max_output_time_in_request(request),
                     computation)
            ostrm = ostringstream()
            computation.print_computation(ostrm, nnet)
            print("Optimized computation:")
            print(ostrm.to_str())

        compute_opts = NnetComputeOptions()
        compute_opts.debug = random.choice([True, False])
        computation.compute_cuda_indexes()
        computer = NnetComputer(compute_opts, computation, nnet, nnet)

        for i, ispec in enumerate(request.inputs):
            temp = CuMatrix.new_from_matrix(inputs[i])
            print("Input sum:", temp.sum())
            computer.accept_input(ispec.name, temp)
        computer.run()

        output = computer.get_output_destructive("output")
        print("Output sum:", output.sum())

        if test_collapse_model:
            computer_collapsed = NnetComputer(compute_opts,
                                              computation_collapsed,
                                              nnet_collapsed, nnet_collapsed)
            for i, ispec in enumerate(request.inputs):
                temp = CuMatrix.new_from_matrix(inputs[i])
                print("Input sum:", temp.sum())
                computer_collapsed.accept_input(ispec.name, temp)
            computer_collapsed.run()
            output_collapsed = computer_collapsed.get_output_destructive("output")
            print("Output sum [collapsed]:", output_collapsed.sum())
            self.assertTrue(approx_equal_cu_matrix(output, output_collapsed),
                            "Regular and collapsed computation outputs differ.")

        output_deriv = CuMatrix.new_from_size(output.num_rows(),
                                              output.num_cols())
        output_deriv.set_randn()
        if request.outputs[0].has_deriv:
            computer.accept_input("output", output_deriv)
            computer.run()
            for i, ispec in enumerate(request.inputs):
                if ispec.has_deriv:
                    in_deriv = computer.get_output_destructive(ispec.name)
                    print("Input-deriv sum for input {} is:".format(ispec.name),
                          in_deriv.sum())