Ejemplo n.º 1
0
    def test_topn(self):
        self.check_skip()
        L = 21
        dims = (L, L + 2)  # avoid square images in tests
        cols = ['x', 'y']
        PRECISION = 0.1

        # top 2
        pos1 = np.array([7, 7])
        pos2 = np.array([14, 14])
        pos3 = np.array([7, 14])
        image = np.ones(dims, dtype='uint8')
        draw_point(image, pos1, 100)
        draw_point(image, pos2, 90)
        draw_point(image, pos3, 80)
        actual = tp.locate(image, 5, 1, topn=2, preprocess=False,
                           engine=self.engine)[cols]
        actual = actual.sort(['x', 'y'])  # sort for reliable comparison
        expected = DataFrame([pos1, pos2], columns=cols).sort(['x', 'y'])
        assert_allclose(actual, expected, atol=PRECISION)

        # top 1
        actual = tp.locate(image, 5, 1, topn=1, preprocess=False,
                           engine=self.engine)[cols]
        actual = actual.sort(['x', 'y'])  # sort for reliable comparison
        expected = DataFrame([pos1], columns=cols).sort(['x', 'y'])
        assert_allclose(actual, expected, atol=PRECISION)
Ejemplo n.º 2
0
def test_compute_lima_on_off_image():
    """
    Test Li & Ma image with snippet from the H.E.S.S. survey data.
    """
    filename = "$GAMMAPY_DATA/tests/unbundled/hess/survey/hess_survey_snippet.fits.gz"
    n_on = Map.read(filename, hdu="ON")
    n_off = Map.read(filename, hdu="OFF")
    a_on = Map.read(filename, hdu="ONEXPOSURE")
    a_off = Map.read(filename, hdu="OFFEXPOSURE")
    significance = Map.read(filename, hdu="SIGNIFICANCE")

    kernel = Tophat2DKernel(5)
    results = compute_lima_on_off_image(n_on, n_off, a_on, a_off, kernel)

    # Reproduce safe significance threshold from HESS software
    results["significance"].data[results["n_on"].data < 5] = 0

    # crop the image at the boundaries, because the reference image
    # is cut out from a large map, there is no way to reproduce the
    # result with regular boundary handling
    actual = results["significance"].crop(kernel.shape).data
    desired = significance.crop(kernel.shape).data

    # Set boundary to NaN in reference image
    assert_allclose(actual, desired, atol=1e-5)
Ejemplo n.º 3
0
def test_add_source_space_distances_limited():
    """Test adding distances to source space with a dist_limit."""
    tempdir = _TempDir()
    src = read_source_spaces(fname)
    src_new = read_source_spaces(fname)
    del src_new[0]['dist']
    del src_new[1]['dist']
    n_do = 200  # limit this for speed
    src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
    src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
    out_name = op.join(tempdir, 'temp-src.fif')
    try:
        add_source_space_distances(src_new, dist_limit=0.007)
    except RuntimeError:  # what we throw when scipy version is wrong
        raise SkipTest('dist_limit requires scipy > 0.13')
    write_source_spaces(out_name, src_new)
    src_new = read_source_spaces(out_name)

    for so, sn in zip(src, src_new):
        assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
        assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
        do = so['dist']
        dn = sn['dist']

        # clean out distances > 0.007 in C code
        do.data[do.data > 0.007] = 0
        do.eliminate_zeros()

        # make sure we have some comparable distances
        assert np.sum(do.data < 0.007) > 400

        # do comparison over the region computed
        d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
        assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
Ejemplo n.º 4
0
def test_distmod():

    d = Distance(10, u.pc)
    assert d.distmod.value == 0

    d = Distance(distmod=20)
    assert d.distmod.value == 20
    assert d.kpc == 100

    d = Distance(distmod=-1., unit=u.au)
    npt.assert_allclose(d.value, 1301442.9440836983)

    with pytest.raises(ValueError):
        d = Distance(value=d, distmod=20)

    with pytest.raises(ValueError):
        d = Distance(z=.23, distmod=20)

    # check the Mpc/kpc/pc behavior
    assert Distance(distmod=1).unit == u.pc
    assert Distance(distmod=11).unit == u.kpc
    assert Distance(distmod=26).unit == u.Mpc
    assert Distance(distmod=-21).unit == u.AU

    # if an array, uses the mean of the log of the distances
    assert Distance(distmod=[1, 11, 26]).unit == u.kpc
Ejemplo n.º 5
0
    def test_dimensionless_operations(self):
        # test conversion to dimensionless
        dq = 3. * u.m / u.km
        dq1 = dq + 1. * u.mm / u.km
        assert dq1.value == 3.001
        assert dq1.unit == dq.unit

        dq2 = dq + 1.
        assert dq2.value == 1.003
        assert dq2.unit == u.dimensionless_unscaled

        # this test will check that operations with dimensionless Quantities
        # don't work
        with pytest.raises(u.UnitsError):
            self.q1 + u.Quantity(0.1, unit=u.Unit(""))

        with pytest.raises(u.UnitsError):
            self.q1 - u.Quantity(0.1, unit=u.Unit(""))

        # and test that scaling of integers works
        q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
        q2 = q + np.array([4, 5, 6])
        assert q2.unit == u.dimensionless_unscaled
        assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
        # but not if doing it inplace
        with pytest.raises(TypeError):
            q += np.array([1, 2, 3])
        # except if it is actually possible
        q = np.array([1, 2, 3]) * u.km / u.m
        q += np.array([4, 5, 6])
        assert q.unit == u.dimensionless_unscaled
        assert np.all(q.value == np.array([1004, 2005, 3006]))
Ejemplo n.º 6
0
def test_reindl(irrad_data, ephem_data, dni_et):
    result = irradiance.reindl(40, 180, irrad_data['dhi'], irrad_data['dni'],
                      irrad_data['ghi'], dni_et,
                      ephem_data['apparent_zenith'],
                      ephem_data['azimuth'])
    # values from matlab 1.4 code
    assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
Ejemplo n.º 7
0
    def test_function(self):
        val = np.random.random((4, 2))
        input_val = np.random.random((4, 2))

        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        yth = KTH.placeholder(ndim=2)
        ytf = KTF.placeholder(ndim=2)

        exp_th = KTH.square(xth) + yth
        exp_tf = KTF.square(xtf) + ytf

        update_th = xth * 2
        update_tf = xtf * 2
        fth = KTH.function([yth], [exp_th], updates=[(xth, update_th)])
        ftf = KTF.function([ytf], [exp_tf], updates=[(xtf, update_tf)])

        function_outputs_th = fth([input_val])[0]
        function_outputs_tf = ftf([input_val])[0]
        assert function_outputs_th.shape == function_outputs_tf.shape
        assert_allclose(function_outputs_th, function_outputs_tf, atol=1e-05)

        new_val_th = KTH.get_value(xth)
        new_val_tf = KTF.get_value(xtf)
        assert new_val_th.shape == new_val_tf.shape
        assert_allclose(new_val_th, new_val_tf, atol=1e-05)
Ejemplo n.º 8
0
def test_c_warp_gray():
    target_transform = AffineTransform.identity(2).from_vector(initial_params)
    warped_im = gray_image.warp_to(template_mask, target_transform,
                                   interpolator='c')

    assert(warped_im.shape == gray_template.shape)
    assert_allclose(warped_im.pixels, gray_template.pixels)
Ejemplo n.º 9
0
def test_haydavies(irrad_data, ephem_data, dni_et):
    result = irradiance.haydavies(40, 180, irrad_data['dhi'], irrad_data['dni'],
                         dni_et,
                         ephem_data['apparent_zenith'],
                         ephem_data['azimuth'])
    # values from matlab 1.4 code
    assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
Ejemplo n.º 10
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Ejemplo n.º 11
0
def test_maskandscale():
    t = np.linspace(20, 30, 15)
    t[3] = 100
    tm = np.ma.masked_greater(t, 99)
    fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        Temp = f.variables['Temperature']
        assert_equal(Temp.missing_value, 9999)
        assert_equal(Temp.add_offset, 20)
        assert_equal(Temp.scale_factor, np.float32(0.01))
        found = Temp[:].compressed()
        del Temp  # Remove ref to mmap, so file can be closed.
        expected = np.round(tm.compressed(), 2)
        assert_allclose(found, expected)

    with in_tempdir():
        newfname = 'ms.nc'
        f = netcdf_file(newfname, 'w', maskandscale=True)
        f.createDimension('Temperature', len(tm))
        temp = f.createVariable('Temperature', 'i', ('Temperature',))
        temp.missing_value = 9999
        temp.scale_factor = 0.01
        temp.add_offset = 20
        temp[:] = tm
        f.close()

        with netcdf_file(newfname, maskandscale=True) as f:
            Temp = f.variables['Temperature']
            assert_equal(Temp.missing_value, 9999)
            assert_equal(Temp.add_offset, 20)
            assert_equal(Temp.scale_factor, np.float32(0.01))
            expected = np.round(tm.compressed(), 2)
            found = Temp[:].compressed()
            del Temp
            assert_allclose(found, expected)
Ejemplo n.º 12
0
def test_measure_curve_of_growth():
    """Test measure_curve_of_growth function"""
    image = generate_gaussian_image()
    radius, containment = measure_curve_of_growth(image, 0, 0, 0.6, 0.05)
    sigma = 0.2
    containment_ana = 1 - np.exp(-0.5 * (radius / sigma) ** 2)
    assert_allclose(containment, containment_ana, rtol=0.1)
Ejemplo n.º 13
0
def test_saving_multiple_metrics_outputs():
    inputs = Input(shape=(5,))
    x = Dense(5)(inputs)
    output1 = Dense(1, name='output1')(x)
    output2 = Dense(1, name='output2')(x)

    model = Model(inputs=inputs, outputs=[output1, output2])

    metrics = {'output1': ['mse', 'binary_accuracy'],
               'output2': ['mse', 'binary_accuracy']
               }
    loss = {'output1': 'mse', 'output2': 'mse'}

    model.compile(loss=loss, optimizer='sgd', metrics=metrics)

    # assure that model is working
    x = np.array([[1, 1, 1, 1, 1]])
    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Ejemplo n.º 14
0
 def test_fuzz(self):
     # try a bunch of crazy inputs
     rfuncs = (
             np.random.uniform,
             np.random.normal,
             np.random.standard_cauchy,
             np.random.exponential)
     ntests = 100
     for i in range(ntests):
         rfunc = random.choice(rfuncs)
         target_norm_1 = random.expovariate(1.0)
         n = random.randrange(2, 16)
         A_original = rfunc(size=(n,n))
         E_original = rfunc(size=(n,n))
         A_original_norm_1 = scipy.linalg.norm(A_original, 1)
         scale = target_norm_1 / A_original_norm_1
         A = scale * A_original
         E = scale * E_original
         M = np.vstack([
             np.hstack([A, E]),
             np.hstack([np.zeros_like(A), A])])
         expected_expm = scipy.linalg.expm(A)
         expected_frechet = scipy.linalg.expm(M)[:n, n:]
         observed_expm, observed_frechet = expm_frechet(A, E)
         assert_allclose(expected_expm, observed_expm)
         assert_allclose(expected_frechet, observed_frechet)
Ejemplo n.º 15
0
 def test_small_norm_expm_frechet(self):
     # methodically test matrices with a range of norms, for better coverage
     M_original = np.array([
         [1, 2, 3, 4],
         [5, 6, 7, 8],
         [0, 0, 1, 2],
         [0, 0, 5, 6],
         ], dtype=float)
     A_original = np.array([
         [1, 2],
         [5, 6],
         ], dtype=float)
     E_original = np.array([
         [3, 4],
         [7, 8],
         ], dtype=float)
     A_original_norm_1 = scipy.linalg.norm(A_original, 1)
     selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
     m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
     for ma, mb in m_neighbor_pairs:
         ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
         ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
         target_norm_1 = 0.5 * (ell_a + ell_b)
         scale = target_norm_1 / A_original_norm_1
         M = scale * M_original
         A = scale * A_original
         E = scale * E_original
         expected_expm = scipy.linalg.expm(A)
         expected_frechet = scipy.linalg.expm(M)[:2, 2:]
         observed_expm, observed_frechet = expm_frechet(A, E)
         assert_allclose(expected_expm, observed_expm)
         assert_allclose(expected_frechet, observed_frechet)
Ejemplo n.º 16
0
 def test_briggs_helper_function(self):
     np.random.seed(1234)
     for a in np.random.randn(10) + 1j * np.random.randn(10):
         for k in range(5):
             x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
             x_expected = a ** np.exp2(-k) - 1
             assert_allclose(x_observed, x_expected)
Ejemplo n.º 17
0
def test_add_patch_info():
    """Test adding patch info to source space."""
    # let's setup a small source space
    src = read_source_spaces(fname_small)
    src_new = read_source_spaces(fname_small)
    for s in src_new:
        s['nearest'] = None
        s['nearest_dist'] = None
        s['pinfo'] = None

    # test that no patch info is added for small dist_limit
    try:
        add_source_space_distances(src_new, dist_limit=0.00001)
    except RuntimeError:  # what we throw when scipy version is wrong
        pass
    else:
        assert all(s['nearest'] is None for s in src_new)
        assert all(s['nearest_dist'] is None for s in src_new)
        assert all(s['pinfo'] is None for s in src_new)

    # now let's use one that works
    add_source_space_distances(src_new)

    for s1, s2 in zip(src, src_new):
        assert_array_equal(s1['nearest'], s2['nearest'])
        assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7)
        assert_equal(len(s1['pinfo']), len(s2['pinfo']))
        for p1, p2 in zip(s1['pinfo'], s2['pinfo']):
            assert_array_equal(p1, p2)
Ejemplo n.º 18
0
def test_sequential_update_disabling():
    val_a = np.random.random((10, 4))
    val_out = np.random.random((10, 4))

    model = keras.models.Sequential()
    model.add(keras.layers.BatchNormalization(input_shape=(4,)))

    model.trainable = False
    assert not model.updates

    model.compile('sgd', 'mse')
    assert not model.updates

    x1 = model.predict(val_a)
    model.train_on_batch(val_a, val_out)
    x2 = model.predict(val_a)
    assert_allclose(x1, x2, atol=1e-7)

    model.trainable = True
    model.compile('sgd', 'mse')
    assert model.updates

    model.train_on_batch(val_a, val_out)
    x2 = model.predict(val_a)
    assert np.abs(np.sum(x1 - x2)) > 1e-5
Ejemplo n.º 19
0
def test_ODP_data():

    dat_calc = [ODP_data[i].sum() for i in ['ODP2 Max', 'ODP2 Min', 'ODP1 Max', 'ODP1 Min', 'ODP2 Design', 'ODP1 Design', 'Lifetime']]
    dat = [77.641999999999996, 58.521999999999998, 64.140000000000001, 42.734000000000002, 63.10509761272651, 47.809027930358717, 2268.1700000000001]
    assert_allclose(dat_calc, dat)

    assert ODP_data.index.is_unique
Ejemplo n.º 20
0
def check_kurt_expect(distfn, arg, m, v, k, msg):
    if np.isfinite(k):
        m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
        npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
                err_msg=msg + ' - kurtosis')
    else:
        npt.assert_(np.isnan(k))
Ejemplo n.º 21
0
 def test_image(self, operation):
     image_1 = utils.block_reduce_hdu(self.image, (2, 4), func=operation)
     if operation == np.sum:
         ref1 = [[8, 8, 8, 8, 8, 8], [8, 8, 8, 8, 8, 8]]
     if operation == np.mean:
         ref1 = [[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]
     assert_allclose(image_1.data, ref1)
Ejemplo n.º 22
0
def test_ref_pixel():
    image = utils.make_empty_image(101, 101, proj='CAR')
    footprint = WCS(image.header).calc_footprint(center=False)
    image_1 = utils.block_reduce_hdu(image, (10, 10), func=np.sum)
    footprint_1 = WCS(image_1.header).calc_footprint(center=False)
    # Lower left corner shouldn't change
    assert_allclose(footprint[0], footprint_1[0])
Ejemplo n.º 23
0
def assert_image_equal(actual, expected):
    if np.issubdtype(actual.dtype, np.integer):
        assert_equal(actual, expected)
    else:
        if np.issubdtype(expected.dtype, np.integer):
            expected = expected/float(np.iinfo(expected.dtype).max)
        assert_allclose(actual, expected, atol=1/256.)
Ejemplo n.º 24
0
    def test_breakdown_underdetermined(self):
        # Should find LSQ solution in the Krylov span in one inner
        # iteration, despite solver breakdown from nilpotent A.
        A = np.array([[0, 1, 1, 1],
                      [0, 0, 1, 1],
                      [0, 0, 0, 1],
                      [0, 0, 0, 0]], dtype=float)

        bs = [
            np.array([1, 1, 1, 1]),
            np.array([1, 1, 1, 0]),
            np.array([1, 1, 0, 0]),
            np.array([1, 0, 0, 0]),
        ]

        for b in bs:
            xp, info = lgmres(A, b, maxiter=1)
            resp = np.linalg.norm(A.dot(xp) - b)

            K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
            y, _, _, _ = np.linalg.lstsq(A.dot(K), b)
            x = K.dot(y)
            res = np.linalg.norm(A.dot(x) - b)

            assert_allclose(resp, res, err_msg=repr(b))
Ejemplo n.º 25
0
def test_SVGP_vs_SGPR(session_tf):
    """
    With a Gaussian likelihood the sparse Gaussian variational (SVGP) model should be equivalent to the analytically 
     optimial sparse regression model (SGPR) after a single nat grad step of size 1
    """
    N, M, D = 4, 3, 2
    X = np.random.randn(N, D)
    Z = np.random.randn(M, D)
    Y = np.random.randn(N, 1)
    kern = gpflow.kernels.RBF(D)
    lik_var = 0.1
    lik = gpflow.likelihoods.Gaussian()
    lik.variance = lik_var

    m_svgp = gpflow.models.SVGP(X, Y, kern, lik, Z=Z)
    m_sgpr = gpflow.models.SGPR(X, Y, kern, Z=Z)
    m_sgpr.likelihood.variance = lik_var

    m_svgp.set_trainable(False)
    m_svgp.q_mu.set_trainable(True)
    m_svgp.q_sqrt.set_trainable(True)
    NatGradOptimizer(1.).minimize(m_svgp, [[m_svgp.q_mu, m_svgp.q_sqrt]], maxiter=1)

    assert_allclose(m_sgpr.compute_log_likelihood(),
                    m_svgp.compute_log_likelihood(), atol=1e-5)
Ejemplo n.º 26
0
def test_hypers_SVGP_vs_SGPR_tensors(session_tf, svgp, sgpr):
    """
    Test SVGP vs SGPR. Running optimization as tensors w/o GPflow wrapper.

    """
    anchor = False
    variationals = [(svgp.q_mu, svgp.q_sqrt)]

    svgp.q_mu.trainable = False
    svgp.q_sqrt.trainable = False

    o1 = NatGradOptimizer(Datum.gamma)
    o1_tensor = o1.make_optimize_tensor(svgp, var_list=variationals)

    o2 = GradientDescentOptimizer(Datum.learning_rate)
    o2_tensor = o2.make_optimize_tensor(svgp)

    o3 = NatGradOptimizer(Datum.gamma)
    o3_tensor = o3.make_optimize_tensor(svgp, var_list=variationals)

    session_tf.run(o1_tensor)

    sgpr_likelihood = sgpr.compute_log_likelihood()
    svgp_likelihood = svgp.compute_log_likelihood()
    assert_allclose(sgpr_likelihood, svgp_likelihood, atol=1e-5)

    session_tf.run(o2_tensor)
    session_tf.run(o3_tensor)

    GradientDescentOptimizer(Datum.learning_rate).minimize(sgpr, maxiter=1, anchor=anchor)

    sgpr_likelihood = sgpr.compute_log_likelihood()
    svgp_likelihood = svgp.compute_log_likelihood()
    assert_allclose(sgpr_likelihood, svgp_likelihood, atol=1e-5)
Ejemplo n.º 27
0
def test_small_q_sqrt_handeled_correctly(session_tf):
    """
    This is an extra test to make sure things still work when q_sqrt is small. This was breaking (#767)
    """
    N, D = 3, 2
    X = np.random.randn(N, D)
    Y = np.random.randn(N, 1)
    kern = gpflow.kernels.RBF(D)
    lik_var = 0.1
    lik = gpflow.likelihoods.Gaussian()
    lik.variance = lik_var

    m_vgp = gpflow.models.VGP(X, Y, kern, lik)
    m_gpr = gpflow.models.GPR(X, Y, kern)
    m_gpr.likelihood.variance = lik_var

    m_vgp.set_trainable(False)
    m_vgp.q_mu.set_trainable(True)
    m_vgp.q_sqrt.set_trainable(True)
    m_vgp.q_mu = np.random.randn(N, 1)
    m_vgp.q_sqrt = np.eye(N)[None, :, :] * 1e-3
    NatGradOptimizer(1.).minimize(m_vgp, [(m_vgp.q_mu, m_vgp.q_sqrt)], maxiter=1)

    assert_allclose(m_gpr.compute_log_likelihood(),
                    m_vgp.compute_log_likelihood(), atol=1e-4)
 def check(fn, stype):
     for _ in range(N):
         ndim = 2
         shape = np.random.randint(1, 6, size=(ndim,))
         npy = np.random.normal(0, 1, size=shape)
         nd = mx.nd.array(npy).tostype(stype)
         assert_allclose(fn(npy), fn(nd).asnumpy(), rtol=1e-4, atol=1e-4)
Ejemplo n.º 29
0
def test_arrays():

    np.random.seed(8234)

    f = run_arrays(1000, model1)
    mod = f.model

    f.summary()  # Smoke test

    # Compare the parameter estimates to population values.
    epar = np.concatenate(model1())
    assert_allclose(f.params, epar, atol=0.3, rtol=0.3)

    # Test the fitted covariance matrix
    cv = f.covariance(mod.time[0:5], mod.exog_scale[0:5, :],
                      mod.exog_smooth[0:5, :])
    assert_allclose(cv, cv.T)  # Check symmetry
    a, _ = np.linalg.eig(cv)
    assert_equal(a > 0, True)  # Check PSD

    # Test predict
    yhat = f.predict()
    assert_equal(np.corrcoef(yhat, mod.endog)[0, 1] > 0.2, True)
    yhatm = f.predict(exog=mod.exog)
    assert_equal(yhat, yhatm)
    yhat0 = mod.predict(params=f.params, exog=mod.exog)
    assert_equal(yhat, yhat0)

    # Smoke test t-test
    f.t_test(np.eye(len(f.params)))
Ejemplo n.º 30
0
def test_formulas():

    np.random.seed(8789)

    f, df = run_formula(1000, model1)
    mod = f.model

    f.summary()  # Smoke test

    # Compare the parameter estimates to population values.
    epar = np.concatenate(model1())
    assert_allclose(f.params, epar, atol=0.1, rtol=1)

    # Test the fitted covariance matrix
    exog_scale = pd.DataFrame(mod.exog_scale[0:5, :],
                              columns=["xsc1", "xsc2"])
    exog_smooth = pd.DataFrame(mod.exog_smooth[0:5, :],
                               columns=["xsm1", "xsm2"])
    cv = f.covariance(mod.time[0:5], exog_scale, exog_smooth)
    assert_allclose(cv, cv.T)
    a, _ = np.linalg.eig(cv)
    assert_equal(a > 0, True)

    # Test predict
    yhat = f.predict()
    assert_equal(np.corrcoef(yhat, mod.endog)[0, 1] > 0.2, True)
    yhatm = f.predict(exog=df)
    assert_equal(yhat, yhatm)
    yhat0 = mod.predict(params=f.params, exog=df)
    assert_equal(yhat, yhat0)

    # Smoke test t-test
    f.t_test(np.eye(len(f.params)))
Ejemplo n.º 31
0
def test_first_solar_spectral_correction_supplied():
    # use the cdte coeffs
    coeffs = (0.87102, -0.040543, -0.00929202, 0.10052, 0.073062, -0.0034187)
    out = atmosphere.first_solar_spectral_correction(1, 1, coefficients=coeffs)
    expected = 0.99134828
    assert_allclose(out, expected, atol=1e-3)
def test_wireless_half_duplex_line_network_with_cross_traffic(num_stations):
    sr = simulate(
        WirelessHalfDuplexLineNetwork,
        stime_limit=SIM_TIME_LIMIT,
        params=dict(
            num_stations=num_stations,
            active_sources=range(num_stations - 1),
            payload_size=PAYLOAD_SIZE,
            source_interval=Exponential(SOURCE_INTERVAL.mean()),
            mac_header_size=MAC_HEADER,
            phy_header_size=PHY_HEADER,
            ack_size=ACK_SIZE,
            preamble=PREAMBLE,
            bitrate=BITRATE,
            difs=DIFS,
            sifs=SIFS,
            slot=SLOT,
            cwmin=CWMIN,
            cwmax=CWMAX,
            distance=DISTANCE,
            connection_radius=CONNECTION_RADIUS,
            speed_of_light=SPEED_OF_LIGHT,
        ),
        loglevel=Logger.Level.ERROR
    )

    client = sr.data.stations[0]
    server = sr.data.stations[-1]
    source_id = client.source.source_id

    expected_interval_avg = SOURCE_INTERVAL.mean()
    expected_number_of_packets = floor(SIM_TIME_LIMIT / expected_interval_avg)

    assert_allclose(
        client.source.num_packets_sent,
        expected_number_of_packets,
        rtol=0.25
    )

    assert_allclose(
        server.sink.num_packets_received,
        (num_stations - 1) * expected_number_of_packets,
        rtol=0.2
    )

    mean_payload = PAYLOAD_SIZE.mean()
    expected_service_time = (
            DIFS + CWMIN/2 * SLOT
            + PREAMBLE + (mean_payload + MAC_HEADER + PHY_HEADER) / BITRATE
            + SIFS + PREAMBLE + (PHY_HEADER + ACK_SIZE) / BITRATE
            + 2 * DISTANCE / SPEED_OF_LIGHT

    )
    delay_low_bound = expected_service_time * (num_stations - 1) * 0.9999
    assert server.sink.source_delays[source_id].mean() >= delay_low_bound

    expected_busy_ratio = expected_service_time / SOURCE_INTERVAL.mean()
    client_iface = sr.data.get_iface(0)
    assert client_iface.transmitter.busy_trace.timeavg() >= expected_busy_ratio

    # Here we make sure that out interfaces for all middle stations
    # have non-empty queues since they also generate traffic at almost the same
    # time as they receive packets from connected stations:
    for i in range(0, num_stations - 2):
        prev_if = sr.data.get_iface(i)
        next_if = sr.data.get_iface(i + 1)
        assert next_if.queue.size_trace.timeavg() > 0
        if i > 0:
            next_busy_rate = next_if.transmitter.busy_trace.timeavg()
            prev_busy_rate = prev_if.transmitter.busy_trace.timeavg()
            assert_allclose(
                next_busy_rate, prev_busy_rate + expected_busy_ratio, rtol=0.35
            )
    mean = accum_mean / N
    var = accum_var / N - mean**2
    std = np.sqrt(var)

    return mean, std, var


if __name__ == "__main__":

    for _ in range(100):

        N = np.random.randint(low=1, high=100000)
        D = np.random.randint(low=1, high=10)
        #(N, D) = (21, 2)
        axis = np.random.randint(low=0, high=2)

        X = np.random.rand(N, D)

        mean, std, var = mean_std_var(X, axis=axis)

        # I'm not 100% sure about this since there is something
        # strange with the tolerate that can't go below 1e-4.
        # I'm not sure if I'm missing some indices or if it's
        # just a rounding thing. It certainly looks goods upon
        # visual inspection.

        rtol = 1e-4
        npt.assert_allclose(mean, np.mean(X, axis=axis), rtol=rtol)
        npt.assert_allclose(std, np.std(X, axis=axis), rtol=rtol)
        npt.assert_allclose(var, np.var(X, axis=axis), rtol=rtol)
Ejemplo n.º 34
0
def test_map_maker(pars, observations, keepdims):
    maker = MapMaker(geom=pars["geom"], geom_true=pars["geom_true"], offset_max="2 deg")

    maps = maker.run(observations)

    counts = maps["counts"]
    assert counts.unit == ""
    assert_allclose(counts.data.sum(), pars["counts"], rtol=1e-5)

    exposure = maps["exposure"]
    assert exposure.unit == "m2 s"
    assert_allclose(exposure.data.sum(), pars["exposure"], rtol=1e-5)

    background = maps["background"]
    assert background.unit == ""
    assert_allclose(background.data.sum(), pars["background"], rtol=1e-5)

    images = maker.run_images(keepdims=keepdims)

    counts = images["counts"]
    assert counts.unit == ""
    assert_allclose(counts.data.sum(), pars["counts"], rtol=1e-5)

    exposure = images["exposure"]
    assert exposure.unit == "m2 s"
    assert_allclose(exposure.data.sum(), pars["exposure_image"], rtol=1e-5)

    background = images["background"]
    assert background.unit == ""
    assert_allclose(background.data.sum(), pars["background"], rtol=1e-5)
Ejemplo n.º 35
0
def test_measure_containment_radius():
    """Test measure_containment_radius function"""
    image = generate_gaussian_image()
    rad = measure_containment_radius(image, 0, 0, 0.8)
    assert_allclose(rad, 0.2 * np.sqrt(2 * np.log(5)), rtol=0.01)
Ejemplo n.º 36
0
def test_first_solar_spectral_correction(module_type, expect):
    ams = np.array([1, 3, 5])
    pws = np.array([1, 3, 5])
    ams, pws = np.meshgrid(ams, pws)
    out = atmosphere.first_solar_spectral_correction(pws, ams, module_type)
    assert_allclose(out, expect, atol=0.001)