Exemplo n.º 1
0
 def check_stddev(self, is_gpu):
     if is_gpu:
         stddev1 = self.gpu_dist.stddev.data
     else:
         stddev1 = self.cpu_dist.stddev.data
     stddev2 = self.scipy_dist.std(**self.scipy_params)
     array.assert_allclose(stddev1, stddev2)
    def check_sample(self, is_gpu):
        if is_gpu:
            smp1 = self.gpu_dist.sample(sample_shape=(100000, ) +
                                        self.sample_shape).data
        else:
            smp1 = self.cpu_dist.sample(sample_shape=(100000, ) +
                                        self.sample_shape).data

        smp2 = []
        for one_params in self.scipy_onebyone_params_iter():
            smp2.append(
                self.scipy_dist.rvs(size=(100000, ) + self.sample_shape,
                                    **one_params))
        smp2 = numpy.vstack(smp2)
        smp2 = smp2.dot(numpy.arange(3))
        smp2 = smp2.reshape((numpy.prod(self.shape), 100000) +
                            self.sample_shape + self.cpu_dist.event_shape)
        smp2 = numpy.rollaxis(smp2, 0,
                              smp2.ndim - len(self.cpu_dist.event_shape))
        smp2 = smp2.reshape((100000, ) + self.sample_shape + self.shape +
                            self.cpu_dist.event_shape)
        array.assert_allclose(smp1.mean(axis=0),
                              smp2.mean(axis=0),
                              atol=3e-2,
                              rtol=3e-2)
        array.assert_allclose(smp1.std(axis=0),
                              smp2.std(axis=0),
                              atol=3e-2,
                              rtol=3e-2)
Exemplo n.º 3
0
    def check_log_prob(self, is_gpu):
        smp = self.sample_for_test()
        if is_gpu:
            log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
        else:
            log_prob1 = self.cpu_dist.log_prob(smp).data

        if self.continuous:
            scipy_prob = self.scipy_dist.logpdf
        else:
            scipy_prob = self.scipy_dist.logpmf

        if self.scipy_onebyone:
            onebyone_smp = smp.reshape(*[
                int(numpy.prod(sh))
                for sh in [self.sample_shape, self.shape, self.event_shape]
            ])
            onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
            onebyone_smp = onebyone_smp.reshape((-1, ) + self.sample_shape +
                                                self.event_shape)
            log_prob2 = []
            for one_params, one_smp in zip(self.scipy_onebyone_params_iter(),
                                           onebyone_smp):
                log_prob2.append(scipy_prob(one_smp, **one_params))
            log_prob2 = numpy.vstack(log_prob2)
            log_prob2 = log_prob2.reshape(int(numpy.prod(self.shape)), -1).T
            log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
        else:
            log_prob2 = scipy_prob(smp, **self.scipy_params)
        array.assert_allclose(log_prob1, log_prob2)
Exemplo n.º 4
0
 def check_stddev(self, is_gpu):
     if is_gpu:
         stddev1 = self.gpu_dist.stddev.data
     else:
         stddev1 = self.cpu_dist.stddev.data
     stddev2 = self.scipy_dist.std(**self.scipy_params)
     array.assert_allclose(stddev1, stddev2)
Exemplo n.º 5
0
    def check_log_prob(self, is_gpu):
        smp = self.sample_for_test()
        if is_gpu:
            log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
        else:
            log_prob1 = self.cpu_dist.log_prob(smp).data

        if self.continuous:
            scipy_prob = self.scipy_dist.logpdf
        else:
            scipy_prob = self.scipy_dist.logpmf

        if self.scipy_onebyone:
            onebyone_smp = smp.reshape(
                (int(numpy.prod(self.sample_shape)),
                 numpy.prod(self.shape),
                 int(numpy.prod(self.event_shape))))
            onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
            onebyone_smp = onebyone_smp.reshape((-1,) + self.sample_shape
                                                + self.event_shape)
            log_prob2 = []
            for one_params, one_smp in zip(
                    self.scipy_onebyone_params_iter(), onebyone_smp):
                log_prob2.append(scipy_prob(one_smp, **one_params))
            log_prob2 = numpy.vstack(log_prob2)
            log_prob2 = log_prob2.reshape(numpy.prod(self.shape), -1).T
            log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
        else:
            log_prob2 = scipy_prob(smp, **self.scipy_params)
        array.assert_allclose(log_prob1, log_prob2)
Exemplo n.º 6
0
def _check_arrays_equal(
        actual_arrays, expected_arrays, test_error_cls, **opts):
    # `opts` is passed through to `testing.assert_all_close`.
    # Check all outputs are equal to expected values
    assert issubclass(test_error_cls, _TestError)

    message = None
    detail_message = None
    while True:
        # Check number of arrays
        if len(actual_arrays) != len(expected_arrays):
            message = (
                'Number of outputs ({}, {}) does not match'.format(
                    len(actual_arrays), len(expected_arrays)))
            break

        # Check dtypes and shapes
        dtypes_match = all([
            y.dtype == ye.dtype
            for y, ye in zip(actual_arrays, expected_arrays)])
        shapes_match = all([
            y.shape == ye.shape
            for y, ye in zip(actual_arrays, expected_arrays)])
        if not (shapes_match and dtypes_match):
            message = 'Shapes and/or dtypes do not match'
            break

        # Check values
        errors = []
        for i, (actual, expected) in (
                enumerate(zip(actual_arrays, expected_arrays))):
            try:
                array_module.assert_allclose(actual, expected, **opts)
            except AssertionError as e:
                errors.append((i, e))
        if errors:
            message = (
                'Outputs do not match the expected values.\n'
                'Indices of outputs that do not match: {}'.format(
                    ', '.join(str(i) for i, e in errors)))
            f = six.StringIO()
            for i, e in errors:
                f.write('Error details of output [{}]:\n'.format(i))
                f.write(str(e))
                f.write('\n')
            detail_message = f.getvalue()
            break
        break

    if message is not None:
        msg = (
            '{}\n'
            'Expected shapes and dtypes: {}\n'
            'Actual shapes and dtypes:   {}\n'.format(
                message,
                utils._format_array_props(expected_arrays),
                utils._format_array_props(actual_arrays)))
        if detail_message is not None:
            msg += '\n\n' + detail_message
        test_error_cls.fail(msg)
Exemplo n.º 7
0
 def check_covariance(self, is_gpu):
     if is_gpu:
         cov1 = self.gpu_dist.covariance.array
     else:
         cov1 = self.cpu_dist.covariance.array
     cov2 = self.params['cov']
     array.assert_allclose(cov1, cov2)
Exemplo n.º 8
0
 def check_covariance(self, is_gpu):
     if is_gpu:
         cov1 = self.gpu_dist.covariance.array
     else:
         cov1 = self.cpu_dist.covariance.array
     cov2 = self.params['cov']
     array.assert_allclose(cov1, cov2)
    def check_log_prob(self, is_gpu):
        smp = self.sample_for_test()
        if is_gpu:
            log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
        else:
            log_prob1 = self.cpu_dist.log_prob(smp).data

        scipy_prob = self.scipy_dist.logpmf

        onebyone_smp = smp.reshape(
            (int(numpy.prod(self.sample_shape)), numpy.prod(self.shape),
             int(numpy.prod(self.event_shape))))
        onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
        onebyone_smp = onebyone_smp.reshape((-1, ) + self.sample_shape +
                                            self.event_shape)

        log_prob2 = []
        for one_params, one_smp in zip(self.scipy_onebyone_params_iter(),
                                       onebyone_smp):
            one_smp = numpy.eye(3)[one_smp]
            log_prob2.append(scipy_prob(one_smp, **one_params))
        log_prob2 = numpy.vstack(log_prob2)
        log_prob2 = log_prob2.reshape(numpy.prod(self.shape), -1).T
        log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
        array.assert_allclose(log_prob1, log_prob2)
Exemplo n.º 10
0
    def check_sample(self, is_gpu):
        if is_gpu:
            smp1 = self.gpu_dist.sample(
                sample_shape=(100000,)+self.sample_shape).data
        else:
            smp1 = self.cpu_dist.sample(
                sample_shape=(100000,)+self.sample_shape).data

        if self.scipy_onebyone:
            smp2 = []
            for one_params in self.scipy_onebyone_params_iter():
                smp2.append(self.scipy_dist.rvs(
                    size=(100000,)+self.sample_shape, **one_params))
            smp2 = numpy.vstack(smp2)
            smp2 = smp2.reshape((numpy.prod(self.shape), 100000)
                                + self.sample_shape
                                + self.cpu_dist.event_shape)
            smp2 = numpy.rollaxis(
                smp2, 0, smp2.ndim-len(self.cpu_dist.event_shape))
            smp2 = smp2.reshape((100000,) + self.sample_shape + self.shape
                                + self.cpu_dist.event_shape)
        else:
            smp2 = self.scipy_dist.rvs(
                size=(100000,) + self.sample_shape + self.shape,
                **self.scipy_params)
        array.assert_allclose(smp1.mean(axis=0), smp2.mean(axis=0),
                              atol=3e-2, rtol=3e-2)
        array.assert_allclose(smp1.std(axis=0), smp2.std(axis=0),
                              atol=3e-2, rtol=3e-2)
Exemplo n.º 11
0
    def check_log_prob(self, is_gpu):
        smp = self.sample_for_test()
        if is_gpu:
            log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
        else:
            log_prob1 = self.cpu_dist.log_prob(smp).data

        if self.continuous:
            scipy_prob = self.scipy_dist.logpdf
        else:
            scipy_prob = self.scipy_dist.logpmf

        if self.scipy_onebyone:
            onebyone_smp = smp.reshape(
                (int(numpy.prod(self.sample_shape)), numpy.prod(self.shape),
                 int(numpy.prod(self.event_shape))))
            onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
            onebyone_smp = onebyone_smp.reshape((-1, ) + self.sample_shape +
                                                self.event_shape)
            log_prob2 = []
            for i in range(numpy.prod(self.shape)):
                one_params = {
                    k: v[i]
                    for k, v in self.scipy_onebyone_params.items()
                }
                one_smp = onebyone_smp[i]
                log_prob2.append(scipy_prob(one_smp, **one_params))
            log_prob2 = numpy.vstack(log_prob2)
            log_prob2 = log_prob2.reshape(numpy.prod(self.shape), -1).T
            log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
        else:
            log_prob2 = scipy_prob(smp, **self.scipy_params)
        array.assert_allclose(log_prob1, log_prob2)
Exemplo n.º 12
0
 def check_cdf(self, is_gpu):
     smp = self.sample_for_test()
     if is_gpu:
         cdf1 = self.gpu_dist.cdf(cuda.to_gpu(smp)).data
     else:
         cdf1 = self.cpu_dist.cdf(smp).data
     cdf2 = self.scipy_dist.cdf(smp, **self.scipy_params)
     array.assert_allclose(cdf1, cdf2)
Exemplo n.º 13
0
 def check_survival(self, is_gpu):
     smp = self.sample_for_test()
     if is_gpu:
         survival1 = self.gpu_dist.survival_function(cuda.to_gpu(smp)).data
     else:
         survival1 = self.cpu_dist.survival_function(smp).data
     survival2 = self.scipy_dist.sf(smp, **self.scipy_params)
     array.assert_allclose(survival1, survival2)
Exemplo n.º 14
0
 def check_cdf(self, is_gpu):
     smp = self.sample_for_test()
     if is_gpu:
         cdf1 = self.gpu_dist.cdf(cuda.to_gpu(smp)).data
     else:
         cdf1 = self.cpu_dist.cdf(smp).data
     cdf2 = self.scipy_dist.cdf(smp, **self.scipy_params)
     array.assert_allclose(cdf1, cdf2)
    def run_test_forward(self, backend_config):
        # Runs the forward test.

        if self.skip_forward_test:
            raise unittest.SkipTest('skip_forward_test is set')

        self.backend_config = backend_config
        self.test_name = 'test_forward'
        self.before_test(self.test_name)

        cpu_inputs = self._generate_inputs()
        cpu_inputs = self._to_noncontiguous_as_needed(cpu_inputs)
        inputs_copied = [a.copy() for a in cpu_inputs]

        # Compute expected outputs
        cpu_expected = self._forward_expected(cpu_inputs)

        # Compute actual outputs
        inputs = backend_config.get_array(cpu_inputs)
        inputs = self._to_noncontiguous_as_needed(inputs)
        outputs = self._forward(
            tuple([
                chainer.Variable(a, requires_grad=a.dtype.kind == 'f')
                for a in inputs
            ]), backend_config)

        # Check inputs has not changed
        indices = []
        for i in range(len(inputs)):
            try:
                array_module.assert_allclose(inputs_copied[i],
                                             inputs[i],
                                             atol=0,
                                             rtol=0)
            except AssertionError:
                indices.append(i)

        if indices:
            f = six.StringIO()
            f.write('Input arrays have been modified during forward.\n'
                    'Indices of modified inputs: {}\n'
                    'Input array shapes and dtypes: {}\n'.format(
                        ', '.join(str(i) for i in indices),
                        utils._format_array_props(inputs)))
            for i in indices:
                f.write('\n')
                f.write('Input[{}]:\n'.format(i))
                f.write('Original:\n')
                f.write(str(inputs_copied[i]))
                f.write('\n')
                f.write('After forward:\n')
                f.write(str(inputs[i]))
                f.write('\n')
            FunctionTestError.fail(f.getvalue())

        self.check_forward_outputs(tuple([var.array for var in outputs]),
                                   cpu_expected)
Exemplo n.º 16
0
 def check_survival(self, is_gpu):
     smp = self.sample_for_test()
     if is_gpu:
         survival1 = self.gpu_dist.survival_function(
             cuda.to_gpu(smp)).data
     else:
         survival1 = self.cpu_dist.survival_function(smp).data
     survival2 = self.scipy_dist.sf(smp, **self.scipy_params)
     array.assert_allclose(survival1, survival2)
Exemplo n.º 17
0
 def check_icdf(self, is_gpu):
     smp = numpy.random.uniform(1e-5, 1 - 1e-5, self.sample_shape +
                                self.shape).astype(numpy.float32)
     if is_gpu:
         icdf1 = self.gpu_dist.icdf(cuda.to_gpu(smp)).data
     else:
         icdf1 = self.cpu_dist.icdf(smp).data
     icdf2 = self.scipy_dist.ppf(smp, **self.scipy_params)
     array.assert_allclose(icdf1, icdf2)
Exemplo n.º 18
0
 def check_icdf(self, is_gpu):
     smp = numpy.random.uniform(
         1e-5, 1 - 1e-5, self.sample_shape + self.shape
     ).astype(numpy.float32)
     if is_gpu:
         icdf1 = self.gpu_dist.icdf(cuda.to_gpu(smp)).data
     else:
         icdf1 = self.cpu_dist.icdf(smp).data
     icdf2 = self.scipy_dist.ppf(smp, **self.scipy_params)
     array.assert_allclose(icdf1, icdf2)
Exemplo n.º 19
0
 def check_prob(self, is_gpu):
     smp = self.sample_for_test()
     if is_gpu:
         prob1 = self.gpu_dist.prob(cuda.to_gpu(smp)).data
     else:
         prob1 = self.cpu_dist.prob(smp).data
     if self.continuous:
         prob2 = self.scipy_dist.pdf(smp, **self.scipy_params)
     else:
         prob2 = self.scipy_dist.pmf(smp, **self.scipy_params)
     array.assert_allclose(prob1, prob2)
Exemplo n.º 20
0
 def check_prob(self, is_gpu):
     smp = self.sample_for_test()
     if is_gpu:
         prob1 = self.gpu_dist.prob(cuda.to_gpu(smp)).data
     else:
         prob1 = self.cpu_dist.prob(smp).data
     if self.continuous:
         prob2 = self.scipy_dist.pdf(smp, **self.scipy_params)
     else:
         prob2 = self.scipy_dist.pmf(smp, **self.scipy_params)
     array.assert_allclose(prob1, prob2)
Exemplo n.º 21
0
 def check_entropy(self, is_gpu):
     if is_gpu:
         ent1 = self.gpu_dist.entropy.data
     else:
         ent1 = self.cpu_dist.entropy.data
     if self.scipy_onebyone:
         ent2 = []
         for one_params in self.scipy_onebyone_params_iter():
             ent2.append(self.scipy_dist.entropy(**one_params))
         ent2 = numpy.vstack(ent2).reshape(self.shape)
     else:
         ent2 = self.scipy_dist.entropy(**self.scipy_params)
     array.assert_allclose(ent1, ent2)
Exemplo n.º 22
0
 def check_entropy(self, is_gpu):
     if is_gpu:
         ent1 = self.gpu_dist.entropy.data
     else:
         ent1 = self.cpu_dist.entropy.data
     if self.scipy_onebyone:
         ent2 = []
         for one_params in self.scipy_onebyone_params_iter():
             ent2.append(self.scipy_dist.entropy(**one_params))
         ent2 = numpy.vstack(ent2).reshape(self.shape)
     else:
         ent2 = self.scipy_dist.entropy(**self.scipy_params)
     array.assert_allclose(ent1, ent2)
Exemplo n.º 23
0
def _check_forward_output_arrays_equal(expected_arrays, actual_arrays,
                                       func_name, **opts):
    # `opts` is passed through to `testing.assert_all_close`.
    # Check all outputs are equal to expected values
    message = None
    while True:
        # Check number of arrays
        if len(expected_arrays) != len(actual_arrays):
            message = ('Number of outputs of forward() ({}, {}) does not '
                       'match'.format(len(expected_arrays),
                                      len(actual_arrays)))
            break

        # Check dtypes and shapes
        dtypes_match = all([
            ye.dtype == y.dtype
            for ye, y in zip(expected_arrays, actual_arrays)
        ])
        shapes_match = all([
            ye.shape == y.shape
            for ye, y in zip(expected_arrays, actual_arrays)
        ])
        if not (shapes_match and dtypes_match):
            message = (
                'Shapes and/or dtypes of forward() do not match'.format())
            break

        # Check values
        indices = []
        for i, (expected,
                actual) in (enumerate(zip(expected_arrays, actual_arrays))):
            try:
                array_module.assert_allclose(expected, actual, **opts)
            except AssertionError:
                indices.append(i)
        if len(indices) > 0:
            message = (
                'Outputs of forward() do not match the expected values.\n'
                'Indices of outputs that do not match: {}'.format(', '.join(
                    str(i) for i in indices)))
            break
        break

    if message is not None:
        FunctionTestError.fail('{}\n'
                               'Expected shapes and dtypes: {}\n'
                               'Actual shapes and dtypes:   {}\n'.format(
                                   message,
                                   utils._format_array_props(expected_arrays),
                                   utils._format_array_props(actual_arrays)))
Exemplo n.º 24
0
    def check_variance(self, is_gpu):
        if is_gpu:
            variance1 = self.gpu_dist.variance.data
        else:
            variance1 = self.cpu_dist.variance.data

        if self.scipy_onebyone:
            variance2 = []
            for one_params in self.scipy_onebyone_params_iter():
                variance2.append(self.scipy_dist.var(**one_params))
            variance2 = numpy.vstack(variance2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            variance2 = self.scipy_dist.var(**self.scipy_params)
        array.assert_allclose(variance1, variance2)
Exemplo n.º 25
0
    def check_mean(self, is_gpu):
        if is_gpu:
            mean1 = self.gpu_dist.mean.data
        else:
            mean1 = self.cpu_dist.mean.data

        if self.scipy_onebyone:
            mean2 = []
            for one_params in self.scipy_onebyone_params_iter():
                mean2.append(self.scipy_dist.mean(**one_params))
            mean2 = numpy.vstack(mean2).reshape(self.shape +
                                                self.cpu_dist.event_shape)
        else:
            mean2 = self.scipy_dist.mean(**self.scipy_params)
        array.assert_allclose(mean1, mean2)
Exemplo n.º 26
0
    def check_mean(self, is_gpu):
        if is_gpu:
            mean1 = self.gpu_dist.mean.data
        else:
            mean1 = self.cpu_dist.mean.data

        if self.scipy_onebyone:
            mean2 = []
            for one_params in self.scipy_onebyone_params_iter():
                mean2.append(self.scipy_dist.mean(**one_params))
            mean2 = numpy.vstack(mean2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            mean2 = self.scipy_dist.mean(**self.scipy_params)
        array.assert_allclose(mean1, mean2)
Exemplo n.º 27
0
    def check_variance(self, is_gpu):
        if is_gpu:
            variance1 = self.gpu_dist.variance.data
        else:
            variance1 = self.cpu_dist.variance.data

        if self.scipy_onebyone:
            variance2 = []
            for one_params in self.scipy_onebyone_params_iter():
                variance2.append(self.scipy_dist.var(**one_params))
            variance2 = numpy.vstack(variance2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            variance2 = self.scipy_dist.var(**self.scipy_params)
        array.assert_allclose(variance1, variance2)
Exemplo n.º 28
0
    def run_test_forward(self, backend_config):
        # Runs the forward test.

        if self.skip_forward_test:
            raise unittest.SkipTest('skip_forward_test is set')

        self.backend_config = backend_config
        self.before_test('test_forward')

        cpu_inputs = self._generate_inputs()
        inputs_copied = [a.copy() for a in cpu_inputs]

        # Compute expected outputs
        cpu_expected = self._forward_expected(cpu_inputs)
        inputs = backend_config.get_array(cpu_inputs)
        inputs = self._to_noncontiguous_as_needed(inputs)

        # Compute actual outputs
        outputs = self._forward(
            tuple([
                chainer.Variable(a, requires_grad=a.dtype.kind == 'f')
                for a in inputs
            ]), backend_config)

        # Check inputs has not changed
        indices = []
        for i in range(len(inputs)):
            try:
                array_module.assert_allclose(inputs_copied[i],
                                             inputs[i],
                                             atol=0,
                                             rtol=0)
            except AssertionError:
                indices.append(i)

        if len(indices) > 0:
            FunctionTestError.fail(
                'Input arrays have been modified during forward.\n'
                'Indices of modified inputs: {}\n'
                'Input array shapes and dtypes: {}\n'.format(
                    ', '.join(str(i) for i in indices),
                    utils._format_array_props(inputs)))

        _check_forward_output_arrays_equal(cpu_expected,
                                           [var.array
                                            for var in outputs], 'forward',
                                           **self.check_forward_options)
Exemplo n.º 29
0
    def run_test_forward(self, backend_config):
        # Runs the forward test.

        if self.skip_forward_test:
            raise unittest.SkipTest('skip_forward_test is set')

        self.backend_config = backend_config
        self.test_name = 'test_forward'
        self.before_test(self.test_name)

        cpu_inputs = self._generate_inputs()
        cpu_inputs = self._to_noncontiguous_as_needed(cpu_inputs)
        inputs_copied = [a.copy() for a in cpu_inputs]

        # Compute expected outputs
        cpu_expected = self._forward_expected(cpu_inputs)

        # Compute actual outputs
        inputs = backend_config.get_array(cpu_inputs)
        inputs = self._to_noncontiguous_as_needed(inputs)
        outputs = self._forward(
            tuple([
                chainer.Variable(a, requires_grad=a.dtype.kind == 'f')
                for a in inputs]),
            backend_config)

        # Check inputs has not changed
        indices = []
        for i in range(len(inputs)):
            try:
                array_module.assert_allclose(
                    inputs_copied[i], inputs[i], atol=0, rtol=0)
            except AssertionError:
                indices.append(i)

        if indices:
            FunctionTestError.fail(
                'Input arrays have been modified during forward.\n'
                'Indices of modified inputs: {}\n'
                'Input array shapes and dtypes: {}\n'.format(
                    ', '.join(str(i) for i in indices),
                    utils._format_array_props(inputs)))

        self.check_forward_outputs(
            tuple([var.array for var in outputs]),
            cpu_expected)
Exemplo n.º 30
0
 def check_entropy(self, is_gpu):
     if is_gpu:
         ent1 = self.gpu_dist.entropy.data
     else:
         ent1 = self.cpu_dist.entropy.data
     if self.scipy_onebyone:
         ent2 = []
         for i in range(numpy.prod(self.shape)):
             one_params = {
                 k: v[i]
                 for k, v in self.scipy_onebyone_params.items()
             }
             ent2.append(self.scipy_dist.entropy(**one_params))
         ent2 = numpy.vstack(ent2).reshape(self.shape)
     else:
         ent2 = self.scipy_dist.entropy(**self.scipy_params)
     array.assert_allclose(ent1, ent2)
Exemplo n.º 31
0
    def check_variance(self, is_gpu):
        if is_gpu:
            variance1 = self.gpu_dist.variance.data
        else:
            variance1 = self.cpu_dist.variance.data

        if self.scipy_onebyone:
            variance2 = []
            for i in range(numpy.prod(self.shape)):
                one_params = {
                    k: v[i]
                    for k, v in self.scipy_onebyone_params.items()
                }
                variance2.append(self.scipy_dist.var(**one_params))
            variance2 = numpy.vstack(variance2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            variance2 = self.scipy_dist.var(**self.scipy_params)
        array.assert_allclose(variance1, variance2)
Exemplo n.º 32
0
    def check_mean(self, is_gpu):
        if is_gpu:
            mean1 = self.gpu_dist.mean.data
        else:
            mean1 = self.cpu_dist.mean.data

        if self.scipy_onebyone:
            mean2 = []
            for i in range(numpy.prod(self.shape)):
                one_params = {
                    k: v[i]
                    for k, v in self.scipy_onebyone_params.items()
                }
                mean2.append(self.scipy_dist.mean(**one_params))
            mean2 = numpy.vstack(mean2).reshape(self.shape +
                                                self.cpu_dist.event_shape)
        else:
            mean2 = self.scipy_dist.mean(**self.scipy_params)
        array.assert_allclose(mean1, mean2)