def get_eq_from_eig(m):   
    ''' get the equilibrium frequencies from the matrix. the eq freqs are the left eigenvector corresponding to eigenvalue of 0. 
        Code here is largely taken from Bloom. See here - https://github.com/jbloom/phyloExpCM/blob/master/src/submatrix.py, specifically in the fxn StationaryStates
    '''
    (w, v) = linalg.eig(m, left=True, right=False)
    max_i = 0
    max_w = w[max_i]
    for i in range(1, len(w)):
        if w[i] > max_w:
            max_w = w[i]
            max_i = i
    assert( abs(max_w) < ZERO ), "Maximum eigenvalue is not close to zero."
    max_v = v[:,max_i]
    max_v /= np.sum(max_v)
    eq_freqs = max_v.real # these are the stationary frequencies
    
    # SOME SANITY CHECKS
    assert np.allclose(np.zeros(61), np.dot(eq_freqs, m)) # should be true since eigenvalue of zero
    pi_inv = np.diag(1.0 / eq_freqs)
    s = np.dot(m, pi_inv)
    assert np.allclose(m, np.dot(s, np.diag(eq_freqs)), atol=ZERO, rtol=1e-5), "exchangeability and equilibrium does not recover matrix"
    
    # And for some impressive overkill, double check pi_i*q_ij = pi_j*q_ji
    for i in range(61):
        pi_i = eq_freqs[i]
        for j in range(61):
            pi_j = eq_freqs[j]
            forward  = pi_i * m[i][j] 
            backward = pi_j * m[j][i]
            assert(abs(forward - backward) < ZERO), "Detailed balance violated."    
    return eq_freqs
Example #2
0
def test_scal(vector_array):
    v = vector_array
    for ind in valid_inds(v):
        if v.len_ind(ind) != v.len_ind_unique(ind):
            with pytest.raises(Exception):
                c = v.copy()
                c[ind].scal(1.)
            continue
        ind_complement_ = ind_complement(v, ind)
        c = v.copy()
        c[ind].scal(1.)
        assert len(c) == len(v)
        assert np.all(almost_equal(c, v))

        c = v.copy()
        c[ind].scal(0.)
        assert np.all(almost_equal(c[ind], v.zeros(v.len_ind(ind))))
        assert np.all(almost_equal(c[ind_complement_], v[ind_complement_]))

        for x in (1., 1.4, np.random.random(v.len_ind(ind))):
            c = v.copy()
            c[ind].scal(x)
            assert np.all(almost_equal(c[ind_complement_], v[ind_complement_]))
            assert np.allclose(c[ind].sup_norm(), v[ind].sup_norm() * abs(x))
            assert np.allclose(c[ind].l2_norm(), v[ind].l2_norm() * abs(x))
            if hasattr(v, 'data'):
                y = v.data.copy()
                if NUMPY_INDEX_QUIRK and len(y) == 0:
                    pass
                else:
                    if isinstance(x, np.ndarray) and not isinstance(ind, Number):
                        x = x[:, np.newaxis]
                    y[ind] *= x
                assert np.allclose(c.data, y)
Example #3
0
def test_pitch_contour():

    # Generate some random pitch
    fs = 8000
    times = np.linspace(0, 5, num=5 * fs, endpoint=True)

    noise = scipy.ndimage.gaussian_filter1d(np.random.randn(len(times)),
                                            sigma=256)
    freqs = 440.0 * 2.0**(16 * noise)

    # negate a bunch of sequences
    idx = np.unique(np.random.randint(0, high=len(times), size=32))
    for start, end in zip(idx[::2], idx[1::2]):
        freqs[start:end] *= -1

    # Test with inferring duration
    x = mir_eval.sonify.pitch_contour(times, freqs, fs)
    assert len(x) == fs * 5

    # Test with an explicit duration
    # This forces the interpolator to go off the end of the sampling grid,
    # which should result in a constant sequence in the output
    x = mir_eval.sonify.pitch_contour(times, freqs, fs, length=fs * 7)
    assert len(x) == fs * 7
    assert np.allclose(x[-fs * 2:], x[-fs * 2])

    # Test with an explicit duration and a fixed offset
    # This forces the interpolator to go off the beginning of
    # the sampling grid, which should result in a constant output
    x = mir_eval.sonify.pitch_contour(times + 5.0, freqs, fs, length=fs * 7)
    assert len(x) == fs * 7
    assert np.allclose(x[:fs * 5], x[0])
Example #4
0
    def test_cache_key(self):
        def fn(x):
            return x ** 2

        f = Gradient(fn)
        self.assertTrue(np.allclose(f(3), 0.0))
        self.assertTrue(np.allclose(f(3.0), 6.0))
Example #5
0
def _validate_covars(covars, cvtype, nmix, n_dim):
    from scipy import linalg
    if cvtype == 'spherical':
        if len(covars) != nmix:
            raise ValueError("'spherical' covars must have length nmix")
        elif np.any(covars <= 0):
            raise ValueError("'spherical' covars must be non-negative")
    elif cvtype == 'tied':
        if covars.shape != (n_dim, n_dim):
            raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
        elif (not np.allclose(covars, covars.T)
              or np.any(linalg.eigvalsh(covars) <= 0)):
            raise ValueError("'tied' covars must be symmetric, "
                             "positive-definite")
    elif cvtype == 'diag':
        if covars.shape != (nmix, n_dim):
            raise ValueError("'diag' covars must have shape (nmix, n_dim)")
        elif np.any(covars <= 0):
            raise ValueError("'diag' covars must be non-negative")
    elif cvtype == 'full':
        if covars.shape != (nmix, n_dim, n_dim):
            raise ValueError("'full' covars must have shape "
                             "(nmix, n_dim, n_dim)")
        for n, cv in enumerate(covars):
            if (not np.allclose(cv, cv.T)
                or np.any(linalg.eigvalsh(cv) <= 0)):
                raise ValueError("component %d of 'full' covars must be "
                                 "symmetric, positive-definite" % n)
Example #6
0
def __unit_test_onset_function(metric):
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # First, test for a warning on empty onsets
        metric(np.array([]), np.arange(10))
        assert len(w) == 1
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference onsets are empty."
        metric(np.arange(10), np.array([]))
        assert len(w) == 2
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Estimated onsets are empty."
        # And that the metric is 0
        assert np.allclose(metric(np.array([]), np.array([])), 0)

    # Now test validation function - onsets must be 1d ndarray
    onsets = np.array([[1., 2.]])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be in seconds (so not huge)
    onsets = np.array([1e10, 1e11])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be sorted
    onsets = np.array([2., 1.])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)

    # Valid onsets which are the same produce a score of 1 for all metrics
    onsets = np.arange(10, dtype=np.float)
    assert np.allclose(metric(onsets, onsets), 1)
def order_segments(segments):
    '''Piece the segments together in order, return a list of vertices
    '''
    segments = [np.array(seg).tolist() for seg in segments]
    if not segments:
        return
    verts = [segments[0][0], segments[0][1]]
    original = range(1, len(segments))
    while original:
        match = False
        for ind, segment in enumerate(segments):
            if not ind in original:
                continue
            pt1, pt2 = segment
            if np.allclose(pt1, verts[-1]):
                verts.append(pt2)
                original.remove(ind)
                match = True
            elif np.allclose(pt2, verts[-1]):
                verts.append(pt1)
                original.remove(ind)
                match = True
        if not match:
            verts.append(verts[0])
            return np.array(verts)
    return np.array(verts)
Example #8
0
    def __init__(self, x, y):

        if len(x.shape) == 2:
            x_row = x[0]
            assert np.allclose(x_row, x)
            x = x_row
        else:
            assert len(x.shape) == 1

        if len(y.shape) == 2:
            y_col = y[:, 0]
            assert np.allclose(y_col, y.T)
            y = y_col
        else:
            assert len(y.shape) == 1

        self.nx = len(x)
        self.ny = len(y)

        self.dx = x[1] - x[0]
        self.dy = y[1] - y[0]

        self.x_origin = x[0]
        self.y_origin = y[0]

        self.width = x[-1] - x[0]
        self.height = y[-1] - y[0]
    def testFull(self, num_best=None, shardsize=100):
        if self.cls == similarities.Similarity:
            index = self.cls(None, corpus, num_features=len(dictionary), shardsize=shardsize)
        else:
            index = self.cls(corpus, num_features=len(dictionary))
        if isinstance(index, similarities.MatrixSimilarity):
            expected = numpy.array([
                [0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [0.0, 0.40824831, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0],
                [0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.40824831, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1., 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026],
                ], dtype=numpy.float32)
            # HACK: dictionary can be in different order, so compare in sorted order
            self.assertTrue(numpy.allclose(sorted(expected.flat), sorted(index.index.flat)))
        index.num_best = num_best
        query = corpus[0]
        sims = index[query]
        expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][ : num_best]

        # convert sims to full numpy arrays, so we can use allclose() and ignore
        # ordering of items with the same similarity value
        expected = matutils.sparse2full(expected, len(index))
        if num_best is not None: # when num_best is None, sims is already a numpy array
            sims = matutils.sparse2full(sims, len(index))
        self.assertTrue(numpy.allclose(expected, sims))
        if self.cls == similarities.Similarity:
            index.destroy()
Example #10
0
def test_distmod():
    # WMAP7 but with Omega_relativisitic = 0
    tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
    core.set_current(tcos)
    assert np.allclose(tcos.distmod([1, 5]).value, [44.124857, 48.40167258])
    assert np.allclose(funcs.distmod([1, 5], cosmo=tcos).value,
                       [44.124857, 48.40167258])
Example #11
0
def test_massivenu_density():
    # Testing neutrino density calculation

    # Simple test cosmology, where we compare rho_nu and rho_gamma
    # against the exact formula (eq 24/25 of Komatsu et al. 2011)
    # computed using Mathematica.  The approximation we use for f(y)
    # is only good to ~ 0.5% (with some redshift dependence), so that's
    # what we test to.
    ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
    nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
    #  First try 3 massive neutrinos, all 100 eV -- note this is a universe
    #  seriously dominated by neutrinos!
    tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
                              m_nu=u.Quantity(100.0, u.eV))
    assert tcos.has_massive_nu
    assert tcos.Neff == 3
    nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
                                                 15633.5, 171.801])
    assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
    assert np.allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)

    # Next, slightly less massive
    tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
                              m_nu=u.Quantity(0.25, u.eV))
    nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
                                                 39.1005, 1.11086])
    assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
                       rtol=5e-3)

    # For this one also test Onu directly
    onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
                        0.06999286, 0.1344951])
    assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)

    # And fairly light
    tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
                              m_nu=u.Quantity(0.01, u.eV))

    nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
                                                 1.90671, 1.00021])
    assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
                       rtol=5e-3)
    onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
                        0.00268404, 0.0978313])
    assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
    assert np.allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
                       rtol=1e-4)
    assert np.allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
                       rtol=1e-4)

    # Now a mixture of neutrino masses, with non-integer Neff
    tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
                              m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
    nurel_exp = nuprefac * tcos.Neff * np.array([149.386233, 74.87915, 50.0518,
                                                 14.002403, 1.03702333])
    assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
                       rtol=5e-3)
    onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
                        0.01963451, 0.10227728])
    assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
Example #12
0
def test_tcmb():
    cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
    assert np.allclose(cosmo.Tcmb0.value, 3.0)
    assert np.allclose(cosmo.Tcmb(2).value, 9.0)
    z = [0.0, 1.0, 2.0, 3.0, 9.0]
    assert np.allclose(cosmo.Tcmb(z).value,
                       [3.0, 6.0, 9.0, 12.0, 30.0], rtol=1e-6)
Example #13
0
def test_tnu():
    cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
    assert np.allclose(cosmo.Tnu0.value, 2.1412975665108247, rtol=1e-6)
    assert np.allclose(cosmo.Tnu(2).value, 6.423892699532474, rtol=1e-6)
    z = [0.0, 1.0, 2.0, 3.0]
    assert np.allclose(cosmo.Tnu(z), [2.14129757, 4.28259513,
                                      6.4238927, 8.56519027], rtol=1e-6)
Example #14
0
        def test_return_internal_type(self):
            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            rng = np.random.RandomState(utt.fetch_seed())
            x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
            x = self.cast_value(x)

            x_ref = self.ref_fct(x)
            x_shared = self.shared_constructor(x, borrow=False)
            total = self.theano_fct(x_shared)

            total_func = theano.function([], total)

            # in this case we can alias with the internal value
            x = x_shared.get_value(borrow=True, return_internal_type=True)
            assert self.test_internal_type(x)

            x /= .5

            # this is not required by the contract but it is a feature we can
            # implement for some type of SharedVariable.
            assert np.allclose(self.ref_fct(x), total_func())

            x = x_shared.get_value(borrow=False, return_internal_type=True)
            assert self.test_internal_type(x)
            assert x is not x_shared.container.value
            x /= .5

            # this is required by the contract
            assert not np.allclose(self.ref_fct(x), total_func())
Example #15
0
        def test_shared_do_alias(self):
            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            rng = np.random.RandomState(utt.fetch_seed())
            x = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x = self.cast_value(x)
            x_ref = self.ref_fct(x)

            x_shared = self.shared_constructor(x, borrow=True)

            total = self.theano_fct(x_shared)

            total_func = theano.function([], total)

            total_val = total_func()

            assert np.allclose(self.ref_fct(x), total_val)

            x /= .5

            # not required by the contract but it is a feature we've implemented
            if self.shared_borrow_true_alias:
                assert np.allclose(self.ref_fct(x), total_func())
            else:
                assert np.allclose(x_ref, total_func())
Example #16
0
def test_voja_modulate(Simulator, nl_nodirect, seed):
    """Tests that voja's rule can be modulated on/off."""
    n = 200
    learned_vector = np.asarray([0.5])

    def control_signal(t):
        """Modulates the learning on/off."""
        return 0 if t < 0.5 else -1

    m = nengo.Network(seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        control = nengo.Node(output=control_signal)
        u = nengo.Node(output=learned_vector)
        x = nengo.Ensemble(n, dimensions=len(learned_vector))

        conn = nengo.Connection(
            u, x, synapse=None, learning_rule_type=Voja(None))
        nengo.Connection(control, conn.learning_rule, synapse=None)

        p_enc = nengo.Probe(conn.learning_rule, 'scaled_encoders')

    with Simulator(m) as sim:
        sim.run(1.0)
    tend = sim.trange() > 0.5

    # Check that encoders stop changing after 0.5s
    assert np.allclose(sim.data[p_enc][tend], sim.data[p_enc][-1])

    # Check that encoders changed during first 0.5s
    i = np.where(tend)[0][0]  # first time point after changeover
    assert not np.allclose(sim.data[p_enc][0], sim.data[p_enc][i])
Example #17
0
def _test_pes(Simulator, nl, plt, seed,
              pre_neurons=False, post_neurons=False, weight_solver=False,
              vin=np.array([0.5, -0.5]), vout=None, n=200,
              function=None, transform=np.array(1.), rate=1e-3):

    vout = np.array(vin) if vout is None else vout

    model = nengo.Network(seed=seed)
    with model:
        model.config[nengo.Ensemble].neuron_type = nl()

        u = nengo.Node(output=vin)
        v = nengo.Node(output=vout)
        a = nengo.Ensemble(n, dimensions=u.size_out)
        b = nengo.Ensemble(n, dimensions=u.size_out)
        e = nengo.Ensemble(n, dimensions=v.size_out)

        nengo.Connection(u, a)

        bslice = b[:v.size_out] if v.size_out < u.size_out else b
        pre = a.neurons if pre_neurons else a
        post = b.neurons if post_neurons else bslice

        conn = nengo.Connection(pre, post,
                                function=function, transform=transform,
                                learning_rule_type=PES(rate))
        if weight_solver:
            conn.solver = nengo.solvers.LstsqL2(weights=True)

        nengo.Connection(v, e, transform=-1)
        nengo.Connection(bslice, e)
        nengo.Connection(e, conn.learning_rule)

        b_p = nengo.Probe(bslice, synapse=0.03)
        e_p = nengo.Probe(e, synapse=0.03)

        weights_p = nengo.Probe(conn, 'weights', sample_every=0.01)
        corr_p = nengo.Probe(conn.learning_rule, 'correction', synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.5)
    t = sim.trange()
    weights = sim.data[weights_p]

    plt.subplot(311)
    plt.plot(t, sim.data[b_p])
    plt.ylabel("Post decoded value")
    plt.subplot(312)
    plt.plot(t, sim.data[e_p])
    plt.ylabel("Error decoded value")
    plt.subplot(313)
    plt.plot(t, sim.data[corr_p] / rate)
    plt.ylabel("PES correction")
    plt.xlabel("Time (s)")

    tend = t > 0.4
    assert np.allclose(sim.data[b_p][tend], vout, atol=0.05)
    assert np.allclose(sim.data[e_p][tend], 0, atol=0.05)
    assert np.allclose(sim.data[corr_p][tend] / rate, 0, atol=0.05)
    assert not np.allclose(weights[0], weights[-1], atol=1e-5)
Example #18
0
def test_dt_dependence(Simulator, plt, learning_rule, seed, rng):
    """Learning rules should work the same regardless of dt."""
    m, activity_p, trans_p = learning_net(
        learning_rule, nengo.Network(seed=seed), rng)

    trans_data = []
    # Using dts greater near tau_ref (0.002 by default) causes learning to
    # differ due to lowered presynaptic firing rate
    dts = (0.0001, 0.001)
    colors = ('b', 'g', 'r')
    for c, dt in zip(colors, dts):
        with Simulator(m, dt=dt) as sim:
            sim.run(0.1)
        trans_data.append(sim.data[trans_p])
        plt.subplot(2, 1, 1)
        plt.plot(sim.trange(dt=0.01), sim.data[trans_p][..., 0], c=c)
        plt.subplot(2, 1, 2)
        plt.plot(sim.trange(), sim.data[activity_p], c=c)

    plt.subplot(2, 1, 1)
    plt.xlim(right=sim.trange()[-1])
    plt.ylabel("Connection weight")
    plt.subplot(2, 1, 2)
    plt.xlim(right=sim.trange()[-1])
    plt.ylabel("Presynaptic activity")

    assert np.allclose(trans_data[0], trans_data[1], atol=3e-3)
    assert not np.allclose(sim.data[trans_p][0], sim.data[trans_p][-1])
Example #19
0
def test_reset(Simulator, learning_rule, plt, seed, rng):
    """Make sure resetting learning rules resets all state."""
    m, activity_p, trans_p = learning_net(
        learning_rule, nengo.Network(seed=seed), rng)

    with Simulator(m) as sim:
        sim.run(0.1)
        sim.run(0.2)

        first_t = sim.trange()
        first_t_trans = sim.trange(dt=0.01)
        first_activity_p = np.array(sim.data[activity_p], copy=True)
        first_trans_p = np.array(sim.data[trans_p], copy=True)

        sim.reset()
        sim.run(0.3)

    plt.subplot(2, 1, 1)
    plt.ylabel("Neural activity")
    plt.plot(first_t, first_activity_p, c='b')
    plt.plot(sim.trange(), sim.data[activity_p], c='g')
    plt.subplot(2, 1, 2)
    plt.ylabel("Connection weight")
    plt.plot(first_t_trans, first_trans_p[..., 0], c='b')
    plt.plot(sim.trange(dt=0.01), sim.data[trans_p][..., 0], c='g')

    assert np.allclose(sim.trange(), first_t)
    assert np.allclose(sim.trange(dt=0.01), first_t_trans)
    assert np.allclose(sim.data[activity_p], first_activity_p)
    assert np.allclose(sim.data[trans_p], first_trans_p)
Example #20
0
    def test_bprop(self):
        r = []

        context = Context()
        for i in xrange(self.N):
            a = self.get_random_array()
            a_gpu = Connector(GpuMatrix.from_npa(a, 'float'), bu_device_id=context)
            vpooling_block = MeanPoolingBlock(a_gpu, axis=0)
            voutput, dL_dvoutput = vpooling_block.output.register_usage(context, context)
            _dL_voutput = self.get_random_array((dL_dvoutput.nrows, dL_dvoutput.ncols))
            GpuMatrix.from_npa(_dL_voutput, 'float').copy_to(context, dL_dvoutput)

            hpooling_block = MeanPoolingBlock(a_gpu, axis=1)
            houtput, dL_dhoutput = hpooling_block.output.register_usage(context, context)
            _dL_houtput = self.get_random_array((dL_dhoutput.nrows, dL_dhoutput.ncols))
            GpuMatrix.from_npa(_dL_houtput, 'float').copy_to(context, dL_dhoutput)

            vpooling_block.fprop()
            vpooling_block.bprop()
            dL_dmatrix = vpooling_block.dL_dmatrix.to_host()
            r.append(np.allclose(dL_dmatrix,
                                 np.repeat(_dL_voutput/a.shape[0], a.shape[0], 0),
                                 atol=1e-6))

            hpooling_block.fprop()
            hpooling_block.bprop()
            hpooling_block.dL_dmatrix.to_host()
            dL_dmatrix = hpooling_block.dL_dmatrix.to_host()
            r.append(np.allclose(dL_dmatrix,
                                 np.repeat(_dL_houtput/a.shape[1], a.shape[1], 1),
                                 atol=1e-6))

        self.assertEqual(sum(r), 2 * self.N)
Example #21
0
    def test_outer_v(self):
        # Check that the augmentation vectors behave as expected

        outer_v = []
        x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
        assert_(len(outer_v) > 0)
        assert_(len(outer_v) <= 6)

        x1, count_1 = do_solve(outer_k=6, outer_v=outer_v)
        assert_(count_1 == 2, count_1)
        assert_(count_1 < count_0/2)
        assert_(allclose(x1, x0, rtol=1e-14))

        # ---

        outer_v = []
        x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, store_outer_Av=False)
        assert_(array([v[1] is None for v in outer_v]).all())
        assert_(len(outer_v) > 0)
        assert_(len(outer_v) <= 6)

        x1, count_1 = do_solve(outer_k=6, outer_v=outer_v)
        assert_(count_1 == 3, count_1)
        assert_(count_1 < count_0/2)
        assert_(allclose(x1, x0, rtol=1e-14))
Example #22
0
def test_warp_reproject_dst_bounds(runner, tmpdir):
    """--bounds option works."""
    srcname = 'tests/data/shade.tif'
    outputname = str(tmpdir.join('test.tif'))
    out_bounds = [-106.45036, 39.6138, -106.44136, 39.6278]
    result = runner.invoke(
        main_group, [
            'warp', srcname, outputname, '--dst-crs', 'EPSG:4326',
            '--res', 0.001, '--bounds'] + out_bounds)
    assert result.exit_code == 0
    assert os.path.exists(outputname)

    with rasterio.open(outputname) as output:
        assert output.crs == {'init': 'epsg:4326'}
        assert np.allclose(output.bounds[0::3],
                           [-106.45036, 39.6278])
        assert np.allclose([0.001, 0.001],
                           [output.transform.a, -output.transform.e])

        # XXX: an extra row and column is produced in the dataset
        # because we're using ceil instead of floor internally.
        # Not necessarily a bug, but may change in the future.
        assert np.allclose([output.bounds[2] - 0.001, output.bounds[1] + 0.001],
                           [-106.44136, 39.6138])
        assert output.width == 10
        assert output.height == 15
 def testSymmetry(self):
     """Verify that the projection is symmetrical about the equator
     """
     for minDec in (-5.0, -1.0, 0.5):
         maxDec = minDec + 2.0
         config = EquatSkyMap.ConfigClass()
         config.decRange = minDec, maxDec
         skyMap = EquatSkyMap(config)
         for tractInfo in skyMap[0:1]:
             numPatches = tractInfo.getNumPatches()
             midXIndex = numPatches[0] / 2
             minPixelPosList = []
             maxPixelPosList = []
             maxYInd = numPatches[1] - 1
             for xInd in (0, midXIndex, numPatches[0] - 1):
                 minDecPatchInfo = tractInfo.getPatchInfo((xInd,0))
                 minDecPosBox = afwGeom.Box2D(minDecPatchInfo.getOuterBBox())
                 minPixelPosList += [
                     minDecPosBox.getMin(),
                     afwGeom.Point2D(minDecPosBox.getMaxX(), minDecPosBox.getMinY()),
                 ]
                 
                 maxDecPatchInfo = tractInfo.getPatchInfo((xInd, maxYInd))
                 maxDecPosBox = afwGeom.Box2D(maxDecPatchInfo.getOuterBBox())
                 maxPixelPosList += [
                     maxDecPosBox.getMax(),
                     afwGeom.Point2D(maxDecPosBox.getMinX(), maxDecPosBox.getMaxY()),
                 ]
             wcs = tractInfo.getWcs()
             minDecList = [wcs.pixelToSky(pos).getPosition(afwGeom.degrees)[1] for pos in minPixelPosList]
             maxDecList = [wcs.pixelToSky(pos).getPosition(afwGeom.degrees)[1] for pos in maxPixelPosList]
             self.assertTrue(numpy.allclose(minDecList, minDecList[0]))
             self.assertTrue(numpy.allclose(maxDecList, maxDecList[0]))
             self.assertTrue(minDecList[0] <= minDec)
             self.assertTrue(maxDecList[0] >= maxDec)
Example #24
0
def test_connected(Simulator):
    m = nengo.Network(label='test_connected', seed=123)
    with m:
        input = nengo.Node(output=lambda t: np.sin(t), label='input')
        output = nengo.Node(output=lambda t, x: np.square(x),
                            size_in=1,
                            label='output')
        nengo.Connection(input, output, synapse=None)  # Direct connection
        p_in = nengo.Probe(input, 'output')
        p_out = nengo.Probe(output, 'output')

    sim = Simulator(m)
    runtime = 0.5
    sim.run(runtime)

    with Plotter(Simulator) as plt:
        t = sim.trange()
        plt.plot(t, sim.data[p_in], label='sin')
        plt.plot(t, sim.data[p_out], label='sin squared')
        plt.plot(t, np.sin(t), label='ideal sin')
        plt.plot(t, np.sin(t) ** 2, label='ideal squared')
        plt.legend(loc='best')
        plt.savefig('test_node.test_connected.pdf')
        plt.close()

    sim_t = sim.trange()
    sim_sin = sim.data[p_in].ravel()
    sim_sq = sim.data[p_out].ravel()
    t = 0.001 * np.arange(len(sim_t))

    assert np.allclose(sim_t, t)
    # 1-step delay
    assert np.allclose(sim_sin[1:], np.sin(t[:-1]))
    assert np.allclose(sim_sq[1:], sim_sin[:-1] ** 2)
Example #25
0
def test_normalization():
    """Test that `match_template` gives the correct normalization.

    Normalization gives 1 for a perfect match and -1 for an inverted-match.
    This test adds positive and negative squares to a zero-array and matches
    the array with a positive template.
    """
    n = 5
    N = 20
    ipos, jpos = (2, 3)
    ineg, jneg = (12, 11)
    image = np.full((N, N), 0.5)
    image[ipos:ipos + n, jpos:jpos + n] = 1
    image[ineg:ineg + n, jneg:jneg + n] = 0

    # white square with a black border
    template = np.zeros((n + 2, n + 2))
    template[1:1 + n, 1:1 + n] = 1

    result = match_template(image, template)

    # get the max and min results.
    sorted_result = np.argsort(result.flat)
    iflat_min = sorted_result[0]
    iflat_max = sorted_result[-1]
    min_result = np.unravel_index(iflat_min, result.shape)
    max_result = np.unravel_index(iflat_max, result.shape)

    # shift result by 1 because of template border
    assert np.all((np.array(min_result) + 1) == (ineg, jneg))
    assert np.all((np.array(max_result) + 1) == (ipos, jpos))

    assert np.allclose(result.flat[iflat_min], -1)
    assert np.allclose(result.flat[iflat_max], 1)
Example #26
0
def test_constant_scalar(Simulator, nl, plt, seed):
    """A Network that represents a constant value."""
    N = 30
    val = 0.5

    m = nengo.Network(label='test_constant_scalar', seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl()
        input = nengo.Node(output=val, label='input')
        A = nengo.Ensemble(N, 1)
        nengo.Connection(input, A)
        in_p = nengo.Probe(input, 'output')
        A_p = nengo.Probe(A, 'decoded_output', synapse=0.1)

    sim = Simulator(m, dt=0.001)
    sim.run(1.0)

    t = sim.trange()
    plt.plot(t, sim.data[in_p], label='Input')
    plt.plot(t, sim.data[A_p], label='Neuron approximation, pstc=0.1')
    plt.ylim([0, 1.05 * val])
    plt.legend(loc=0)

    assert np.allclose(sim.data[in_p], val, atol=.1, rtol=.01)
    assert np.allclose(sim.data[A_p][-10:], val, atol=.1, rtol=.01)
def isSkewSymmetric(A):
    """
        Returns True if input matrix is skew-symmetric.
        
        Parameter
        ---------
        A : array-like
            The input matrix. 
        
        Returns
        -------
        isSkewSymmetric : bool
            Returns True if the matrix is skew-symmetric; False otherwise.
        
        See Also
        --------
        isSquare, isSymmetric, isUpperTriangular, isLowerTriangular, isDiagonal
    """
    
    assert isSquare(A), 'Input matrix should be a square matrix.'
    
    if np.allclose(A, -1*A.T) and np.allclose(A.diagonal(),0):
        return True
    else:
        return False
Example #28
0
def test_warp_no_reproject_bounds_res(runner, tmpdir):
    srcname = 'tests/data/shade.tif'
    outputname = str(tmpdir.join('test.tif'))
    out_bounds = [-11850000, 4810000, -11849000, 4812000]
    result = runner.invoke(main_group, [
        'warp', srcname, outputname, '--res', 30, '--bounds'] + out_bounds)
    assert result.exit_code == 0
    assert os.path.exists(outputname)

    with rasterio.open(srcname) as src:
        with rasterio.open(outputname) as output:
            assert output.crs == src.crs
            assert np.allclose(output.bounds, out_bounds)
            assert np.allclose([30, 30], [output.transform.a, -output.transform.e])
            assert output.width == 34
            assert output.height == 67

    # dst-bounds should be an alias to bounds
    outputname = str(tmpdir.join('test2.tif'))
    out_bounds = [-11850000, 4810000, -11849000, 4812000]
    result = runner.invoke(main_group, [
        'warp', srcname, outputname, '--res', 30, '--dst-bounds'] + out_bounds)
    assert result.exit_code == 0
    assert os.path.exists(outputname)
    with rasterio.open(srcname) as src:
        with rasterio.open(outputname) as output:
            assert np.allclose(output.bounds, out_bounds)
Example #29
0
 def test_hv(self):
     def fn(x):
         return np.dot(x, x).sum()
     x = np.ones((3, 3))
     F = HessianVector(fn)
     self.assertTrue(np.allclose(x * 6, F(x, vectors=x)))
     self.assertTrue(np.allclose(x * 2, F(x[0], vectors=x[0])))
def svd_example():
    a = np.floor(np.random.rand(4, 4)*20-6)
    logger.info("Matrix A:\n %s", a)
    b = np.floor(np.random.rand(4, 1)*20-6)
    logger.info("Matrix B:\n %s", b)

    u, s, v_t = np.linalg.svd(a) # SVD decomposition of A
    logger.info("Matrix U:\n %s", u)
    logger.info("Matrix S:\n %s", s)
    logger.info("Matrix V(transpose:\n %s", u)

    logger.info("Computing inverse using linalg.pinv")
    # Computing the inverse using pinv
    inv_pinv = np.linalg.pinv(a)
    logger.info("pinv:\n %s", inv_pinv)

    # Computing inverse using matrix decomposition
    logger.info("Computing inverse using svd matrix decomposition")
    inv_svd = np.dot(np.dot(v_t.T, np.linalg.inv(np.diag(s))), u.T)
    logger.info("svd inverse:\n %s", inv_svd)
    logger.info("comparing the results from pinv and svd_inverse:\n %s",
                np.allclose(inv_pinv, inv_svd))

    logger.info("Sol1: Solving x using pinv matrix... x=A^-1 x b")
    result_pinv_x = np.dot(inv_pinv, b)

    logger.info("Sol2: Solving x using svd_inverse matrix... x=A^-1 x b")
    result_svd_x = np.dot(inv_svd, b)

    if not np.allclose(result_pinv_x, result_svd_x):
        raise ValueError('Should have been True')
Example #31
0
 def test_slerp_datum(self):
     lon0, lat0 = (183, 0)
     lon1, lat1 = (179, 0)
     res = slerp(lon0, lat0, lon1, lat1, 0.5)
     res %= 360
     self.assertTrue(np.allclose(res, (181, 0)))
Example #32
0
 def test_notasglove(self):
     words = "inception monkey earlgrey"
     pred = self.emb.predict([self.emb * x for x in words.split()])
     gpred = self.glove.predict([self.glove * x for x in words.split()])
     self.assertFalse(np.allclose(pred, gpred))
Example #33
0
 def test_sameasbase(self):
     words = "inception monkey earlgrey"
     pred = self.emb.predict([self.emb * x for x in words.split()])
     gpred = self.baseemb.predict([self.baseemb * x for x in words.split()])
     self.assertTrue(np.allclose(pred, gpred))
Example #34
0
 def test_sameasglove(self):
     words = "key the a his"
     pred = self.emb.predict([self.emb * x for x in words.split()])
     gpred = self.glove.predict([self.glove * x for x in words.split()])
     self.assertTrue(np.allclose(pred, gpred))
Example #35
0
 def test_notasbase(self):
     words = "key the a his"
     pred = self.emb.predict([self.emb * x for x in words.split()])
     gpred = self.baseemb.predict([self.baseemb * x for x in words.split()])
     self.assertFalse(np.allclose(pred, gpred))
Example #36
0
def run_mapping():
    basepath    = os.path.normpath(os.getcwd())
    configpath  = os.path.join(basepath, 'inputs')
    print "os.getcwd()", os.getcwd()
    globals2 = {}
    locals2 = {}

    fname = "inputs.user_inputs"
    command_module = __import__("inputs.user_inputs", globals2, locals2, ['*'])

    required_inputs = {
        'xref' : None,
        'nastran_call' :  None,
    }
    for key in required_inputs:
        if key not in command_module.__dict__:
            msg = 'fname=%r doesnt contain %r' % (fname, key)
        value = command_module.__dict__[key]
        required_inputs[key] = value
        #print key, value


    nastran_call = required_inputs['nastran_call']
    xref = required_inputs['xref']

    print "nastran_call = %r" % nastran_call
    #print "globals2 =", globals2
    print "xref =", xref
    #print "locals2 =", locals2
    #print "globals()", globals()
    asfd


    workpath    = os.path.join(basepath, 'outputsFinal')

    # load mapping
    cart3dLoads = os.path.join(workpath,  'Cart3d_35000_0.825_10_0_0_0_0.i.triq')
    bdfModel    = os.path.join(configpath,'aeroModel_mod.bdf')
    bdfModelOut = os.path.join(workpath,  'fem_loads_3.bdf')
    # mappingMatrix.new.out - stored in workpath

    # deflection mapping
    cart3dGeom  = os.path.join(configpath, 'Cart3d_bwb.i.tri')
    cart3dGeom2 = os.path.join(workpath, 'Components.i.tri')
    bdf = os.path.join(workpath, 'fem3.bdf')
    op2 = os.path.join(workpath, 'fem3.op2')
    f06 = os.path.join(workpath, 'fem3.f06')

    assert os.path.exists(bdf), '%r doesnt exist' % bdf
    assert os.path.exists(bdfModel), '%r doesnt exist' % bdfModel
    assert os.path.exists(cart3dGeom), '%r doesnt exist' % cart3dGeom
    log.info("basepath = %s" % basepath)

    os.chdir(workpath)
    copyFile(cart3dGeom, 'Components.i.tri')

    nodeList = [20037, 21140, 21787, 21028, 1151, 1886, 2018, 1477, 1023, 1116, 1201, 1116, 1201, 1828, 2589, 1373, 1315, 1571, 1507, 1532, 1317, 1327, 2011, 1445, 2352, 1564, 1878, 1402, 1196, 1234, 1252, 1679, 1926, 1274, 2060, 2365, 21486, 20018, 20890, 20035, 1393, 2350, 1487, 1530, 1698, 1782]
    outfile = open('convergeDeflections.out', 'ab')

    maxADeflectionOld = 0.
    nIterations = 30
    iCart = 1
    for i in range(1, nIterations):
        strI = '_' + str(i)
        assert os.path.exists('Components.i.tri')
        #if i==iCart:
        if 0:
            # run cart3d
            log.info("---running Cart3d #%s---" % i)
            sys.stdout.flush()
            failFlag = os.system('./COMMAND > command.out') # runs cart3d.i.tri, makes Components.i.triq
            assert failFlag == 0, 'Cart3d ./COMMAND failed on iteration #%s' % i
            moveFile('Components.i.triq', cart3dLoads)
            copyFile(cart3dLoads, cart3dLoads+strI)
            copyFile('forces.dat',  'forces.dat'  + strI)
            copyFile('moments.dat', 'moments.dat' + strI)
            copyFile('loadsCC.dat', 'loadsCC.dat' + strI)
            copyFile('history.dat', 'history.dat' + strI)
            os.remove('Components.i.tri') # verifies new Components.i.tri gets created
            sys.stdout.flush()

        # map deflections
        run_map_loads(cart3dLoads, bdfModel, bdfModelOut)  # maps loads
        copyFile(bdfModelOut, bdfModelOut + strI)

        # run nastran
        log.info("---running Nastran #%s---" % i)
        sys.stdout.flush()
        #failFlag = os.system('nastran scr=yes bat=no fem3.bdf') # runs fem3.bdf with fem_loads_3.bdf
        #assert failFlag == 0,'nastran failed on iteration #%s' % i
        copyFile('fem3.op2', 'fem3.op2' + strI)
        copyFile('fem3.f06', 'fem3.f06' + strI)
        os.remove(bdfModelOut) # cleans up fem_loads.bdf

        # map deflections
        (wA, wS) = run_map_deflections(nodeList, bdf, op2, cart3dGeom, cart3dGeom2, log=log)
        assert os.path.exists('Components.i.tri')
        os.remove(op2) # verifies new fem3.op2 was created
        os.remove(f06) # verifies new fem3.f06 was created

        # post-processing
        (maxAID, maxADeflection) = maxDict(wA)
        maxSID = '???'
        maxADeflection = wA[maxAID]
        maxSDeflection = max(wS)[0,0]
        log.info(     "AERO      - i=%s maxAID=%s maxADeflection=%s"   %(i, maxAID, maxADeflection))
        log.info(     "STRUCTURE - i=%s maxSID=%s maxSDeflection=%s"   %(i, maxSID, maxSDeflection))
        outfile.write("AERO      - i=%s maxAID=%s maxADeflection=%s\n" %(i, maxAID, maxADeflection))
        outfile.write("STRUCTURE - i=%s maxSID=%s maxSDeflection=%s\n" %(i, maxSID, maxSDeflection))

        msg  = '\n'+'*'*80+'\n'
        msg += 'finished iteration #%s\n' %(i)
        msg += '*'*80+'\n'
        log.info(msg)

        if allclose(maxADeflection, maxADeflectionOld, atol=0.001):
            break
        maxADeflectionOld = copy.deepcopy(maxADeflection)
        iCart += 1
        sys.stdout.flush()

    outfile.close()
    log.info('---finished runMapping.py---')
Example #37
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        xyz_unit = reoriented.header.get_xyzt_units()[0]
        if xyz_unit == 'unknown':
            # Common assumption; if we're wrong, unlikely to be the only thing that breaks
            xyz_unit = 'mm'

        # Set a 0.05mm threshold to performing rescaling
        atol = {'meter': 1e-5, 'mm': 0.01, 'micron': 10}[xyz_unit]

        # Rescale => change zooms
        # Resize => update image dimensions
        rescale = not np.allclose(zooms, target_zooms, atol=atol)
        resize = not np.all(shape == target_shape)
        if rescale or resize:
            target_affine = np.eye(4, dtype=reoriented.affine.dtype)
            if rescale:
                scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3].dot(
                    np.diag(scale_factor))
            else:
                target_affine[:3, :3] = reoriented.affine[:3, :3]

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = (reoriented.affine[:3, 3] * size_factor -
                          reoriented.affine[:3, 3])
                target_affine[:3,
                              3] = reoriented.affine[:3,
                                                     3] + offset.astype(int)
            else:
                target_affine[:3, 3] = reoriented.affine[:3, 3]

            data = nli.resample_img(reoriented, target_affine,
                                    target_shape).get_data()
            reoriented = reoriented.__class__(data, target_affine,
                                              reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname,
                                       suffix='_ras',
                                       newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        self._results['out_file'] = out_name

        return runtime
Example #38
0
    def _make_graph(self):
        self.logger.info("Generating training graph on {} GPUs ...".format(self.cfg.nr_gpus))

        weights_initializer = slim.xavier_initializer()
        biases_initializer = tf.constant_initializer(0.)
        biases_regularizer = tf.no_regularizer
        weights_regularizer = tf.contrib.layers.l2_regularizer(self.cfg.weight_decay)

        tower_grads = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(self.cfg.nr_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('tower_%d' % i) as name_scope:
                        # Force all Variables to reside on the CPU.
                        with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
                            with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                                                 slim.conv2d_transpose, slim.separable_conv2d,
                                                 slim.fully_connected],
                                                weights_regularizer=weights_regularizer,
                                                biases_regularizer=biases_regularizer,
                                                weights_initializer=weights_initializer,
                                                biases_initializer=biases_initializer):
                                # loss over single GPU
                                self.net.make_network(is_train=True)
                                if i == self.cfg.nr_gpus - 1:
                                    loss = self.net.get_loss(include_wd=True)
                                else:
                                    loss = self.net.get_loss()
                                self._input_list.append( self.net.get_inputs() )

                        tf.get_variable_scope().reuse_variables()

                        if i == 0:
                            if self.cfg.nr_gpus > 1 and self.cfg.bn_train is True:
                                self.logger.warning("BN is calculated only on single GPU.")
                            extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
                            with tf.control_dependencies(extra_update_ops):
                                grads = self._optimizer.compute_gradients(loss)
                        else:
                            grads = self._optimizer.compute_gradients(loss)
                        final_grads = []
                        with tf.variable_scope('Gradient_Mult') as scope:
                            for grad, var in grads:
                                scale = 1.
                                if self.cfg.double_bias and '/biases:' in var.name:
                                    scale *= 2.
                                if not np.allclose(scale, 1.):
                                    grad = tf.multiply(grad, scale)
                                final_grads.append((grad, var))
                        tower_grads.append(final_grads)

        if len(tower_grads) > 1:
            grads = sum_gradients(tower_grads)
        else:
            grads = tower_grads[0]

        if False:
            variable_averages = tf.train.ExponentialMovingAverage(0.9999)
            variables_to_average = (tf.trainable_variables() + tf.moving_average_variables())
            variables_averages_op = variable_averages.apply(variables_to_average)

            apply_gradient_op = self._optimizer.apply_gradients(grads)
            train_op = tf.group(apply_gradient_op, variables_averages_op, *extra_update_ops)
        else:
            apply_gradient_op = self._optimizer.apply_gradients(grads)
            train_op = tf.group(apply_gradient_op, *extra_update_ops)

        return train_op
Example #39
0
 def find_erange(e):
     for i, j in enumerate(erange_unique):
         if np.allclose(j, e):
             return i
Example #40
0
 def test_slerp(self):
     lon0, lat0 = (0, 0)
     lon1, lat1 = (0, 1)
     self.assertTrue(
         np.allclose(slerp(lon0, lat0, lon1, lat1, 0.5), (0, 0.5)))
 def test_compute_ring_normal(self):
   # FIXME might break with different version of rdkit
   normal = rgf.compute_ring_normal(self.cycle4, range(4))
   self.assertTrue(
       np.allclose(np.abs(normal / np.linalg.norm(normal)), [0, 0, 1]))
Example #42
0
 def find_trange(t):
     for i, j in enumerate(trange_unique):
         if np.allclose(j, t, rtol=1e-15):
             return i
    def test_calc_collapse_structures1(self):
        edm = EventDamageModel([0.0]*17, [0.0]*17, [0.0]*17,
                               [0.0]*17, [0.0]*17)
        edm.struct_damage = num.zeros(17,num.float)
        edm.contents_damage = num.zeros(17,num.float)
        collapse_probability = {0.4:[0], #0
                                0.6:[1], #1
                                0.5:[2], #1
                                0.25:[3,4], #1
                                0.1:[5,6,7,8], #0
                                0.2:[9,10,11,12,13,14,15,16]} #2

        assert num.allclose(edm.max_depths, 0.0)
        assert num.allclose(edm.shore_distances, 0.0)
        assert num.allclose(edm.walls, 0.0)
        assert num.allclose(edm.struct_costs, 0.0)
        assert num.allclose(edm.content_costs, 0.0)

        edm._calc_collapse_structures(collapse_probability, verbose_csv=True)

        # Random numbers are not stable between Python2 and Python3 - even with the same seed seed(17, version=1)
        # See https://stackoverflow.com/questions/11929701/why-is-seeding-the-random-generator-not-stable-between-versions-of-python

        if system_tools.major_version == 2:
            assert num.allclose(edm.struct_damage, [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), 'Expected %s' % edm.struct_damage
            assert num.allclose(edm.contents_damage, [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), 'Expected %s' % edm.contents_damage
            self.assertTrue(edm.struct_damage[0] == 0.0 and
                            edm.contents_damage[0] == 0.0,
                            'Error!')
            self.assertTrue(edm.struct_damage[1] == 1.0 and
                            edm.contents_damage[1] == 1.0,
                            'Error!')
            self.assertTrue(edm.struct_damage[2] == 1.0 and
                            edm.contents_damage[2] == 1.0,
                            'Error!')
            self.assertTrue(edm.struct_damage[3] + edm.struct_damage[4] == 1.0 and
                            edm.contents_damage[3] + edm.contents_damage[4] ==1.0,
                            'Error!')
        elif system_tools.major_version == 3:
            assert num.allclose(edm.struct_damage, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0]), 'Expected %s' % edm.struct_damage
            assert num.allclose(edm.contents_damage, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0]), 'Expected %s' % edm.contents_damage
            self.assertTrue(edm.struct_damage[0] == 0.0 and
                            edm.contents_damage[0] == 0.0,
                            'Error!')
            self.assertTrue(edm.struct_damage[1] == 1.0 and
                            edm.contents_damage[1] == 1.0,
                            'Error!')
            self.assertTrue(edm.struct_damage[2] == 0.0 and
                            edm.contents_damage[2] == 0.0,
                            'Error!')
            self.assertTrue(edm.struct_damage[3] + edm.struct_damage[4] == 0 and
                            edm.contents_damage[3] + edm.contents_damage[4] ==0,
                            'Error!')
        else:
            raise Exception('Unknown python version: %s' % system_tools.version)


        sum_struct = 0.0
        sum_contents = 0.0
        for i in [5,6,7,8]:
            sum_struct += edm.struct_damage[i]
            sum_contents += edm.contents_damage[i]
        #print("", end=' ')
        self.assertTrue(sum_struct == 0.0 and sum_contents  == 0.0,
                        'Error!')
        sum_struct = 0.0
        sum_contents = 0.0
        for i in [9,10,11,12,13,14,15,16]:
            sum_struct += edm.struct_damage[i]
            sum_contents += edm.contents_damage[i]
        self.assertTrue( sum_struct == 2.0 and sum_contents  == 2.0,
                        'Error!')
def test_multi_channel_xcorr():
    from eqcorrscan.utils.correlate import get_stream_xcorr

    chans = ['EHZ', 'EHN', 'EHE']
    stas = ['COVA', 'FOZ', 'LARB', 'GOVA', 'MTFO', 'MTBA']
    n_templates = 20
    stream_len = 10000
    template_len = 200
    templates = []
    stream = Stream()
    for station in stas:
        for channel in chans:
            stream += Trace(data=np.random.randn(stream_len))
            stream[-1].stats.channel = channel
            stream[-1].stats.station = station
    for i in range(n_templates):
        template = Stream()
        for station in stas:
            for channel in chans:
                template += Trace(data=np.random.randn(template_len))
                template[-1].stats.channel = channel
                template[-1].stats.station = station
        templates.append(template)
    print("Running time serial")
    tic = time.time()
    multichannel_normxcorr = get_stream_xcorr(
        "time_domain", concurrency=None)
    cccsums_t_s, no_chans, chans = multichannel_normxcorr(
        templates=templates, stream=stream)
    toc = time.time()
    print('Time-domain in serial took: %f seconds' % (toc-tic))
    print("Running time parallel")
    tic = time.time()
    multichannel_normxcorr = get_stream_xcorr(
        "time_domain", concurrency="multiprocess")
    cccsums_t_p, no_chans, chans = multichannel_normxcorr(
        templates=templates, stream=stream, cores=4)
    toc = time.time()
    print('Time-domain in parallel took: %f seconds' % (toc-tic))
    print("Running frequency serial")
    tic = time.time()
    multichannel_normxcorr = get_stream_xcorr(
        "fftw", concurrency=None)
    cccsums_f_s, no_chans, chans = multichannel_normxcorr(
        templates=templates, stream=stream)
    toc = time.time()
    print('Frequency-domain in serial took: %f seconds' % (toc-tic))
    print("Running frequency parallel")
    tic = time.time()
    multichannel_normxcorr = get_stream_xcorr(
        "fftw", concurrency="multiprocess")
    cccsums_f_p, no_chans, chans = multichannel_normxcorr(
        templates=templates, stream=stream, cores=4)
    toc = time.time()
    print('Frequency-domain in parallel took: %f seconds' % (toc-tic))
    print("Running frequency openmp parallel")
    tic = time.time()
    multichannel_normxcorr = get_stream_xcorr(
        "fftw", concurrency="concurrent")
    cccsums_f_op, no_chans, chans = multichannel_normxcorr(
        templates=templates, stream=stream, cores=4)
    toc = time.time()
    print('Frequency-domain in parallel took: %f seconds' % (toc-tic))
    print("Running frequency openmp parallel outer")
    tic = time.time()
    multichannel_normxcorr = get_stream_xcorr(
        "fftw", concurrency="concurrent")
    cccsums_f_outer_op, no_chans, chans = multichannel_normxcorr(
        templates=templates, stream=stream, cores=1, cores_outer=4)
    toc = time.time()
    print('Frequency-domain in parallel took: %f seconds' % (toc-tic))
    print("Finished")
    assert(np.allclose(cccsums_t_s, cccsums_t_p, atol=0.00001))
    assert(np.allclose(cccsums_f_s, cccsums_f_p, atol=0.00001))
    assert(np.allclose(cccsums_f_s, cccsums_f_op, atol=0.00001))
    assert(np.allclose(cccsums_f_s, cccsums_f_outer_op, atol=0.00001))
    assert(np.allclose(cccsums_t_p, cccsums_f_s, atol=0.001))
Example #45
0
def eval_transport(sim):
    print("evaluating transport...")

    name = ex[sim.idxsim]
    gwtname = "gwt_" + name

    fpth = os.path.join(sim.simpath, "{}.ucn".format(gwtname))
    try:
        cobj = flopy.utils.HeadFile(
            fpth, precision="double", text="CONCENTRATION"
        )
        conc = cobj.get_data()
    except:
        assert False, 'could not load data from "{}"'.format(fpth)

    # This is the answer to this problem.  These concentrations are for
    # time step 200.
    cres1 = [
        [
            [
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                9.99999999e-01,
                9.99999999e-01,
                9.99999998e-01,
                9.99999996e-01,
                9.99999992e-01,
                9.99999986e-01,
                9.99999974e-01,
                9.99999953e-01,
                9.99999916e-01,
                9.99999853e-01,
                9.99999744e-01,
                9.99999560e-01,
                9.99999254e-01,
                9.99998750e-01,
                9.99997930e-01,
                9.99996616e-01,
                9.99994534e-01,
                9.99991280e-01,
                9.99986256e-01,
                9.99978596e-01,
                9.99967064e-01,
                9.99949912e-01,
                9.99924710e-01,
                9.99888122e-01,
                9.99835630e-01,
                9.99761195e-01,
                9.99656858e-01,
                9.99512258e-01,
                9.99314094e-01,
                9.99045508e-01,
                9.98685418e-01,
                9.98207806e-01,
                9.97580979e-01,
                9.96766847e-01,
                9.95720240e-01,
                9.94388311e-01,
                9.92710079e-01,
                9.90616155e-01,
                9.88028708e-01,
                9.84861727e-01,
                9.81021610e-01,
                9.76408132e-01,
                9.70915801e-01,
                9.64435624e-01,
                9.56857251e-01,
                9.48071482e-01,
                9.37973073e-01,
                9.26463768e-01,
                9.13455460e-01,
                8.98873378e-01,
                8.82659167e-01,
                8.64773747e-01,
                8.45199820e-01,
                8.23943904e-01,
                8.01037798e-01,
                7.76539388e-01,
                7.50532734e-01,
                7.23127419e-01,
                6.94457149e-01,
                6.64677639e-01,
                6.33963857e-01,
                6.02506698e-01,
                5.70509218e-01,
                5.38182550e-01,
                5.05741631e-01,
                4.73400912e-01,
                4.41370157e-01,
                4.09850495e-01,
                3.79030822e-01,
                3.49084661e-01,
                3.20167556e-01,
                2.92415050e-01,
                2.65941266e-01,
                2.40838116e-01,
                2.17175095e-01,
                1.94999626e-01,
                1.74337914e-01,
                1.55196221e-01,
                1.37562492e-01,
                1.21408260e-01,
                1.06690730e-01,
                9.33549731e-02,
                8.13361604e-02,
                7.05617550e-02,
                6.09536208e-02,
                5.24299924e-02,
                4.49072719e-02,
                3.83016284e-02,
                3.25303834e-02,
                2.75131759e-02,
                2.31729100e-02,
                1.94364916e-02,
                1.62353686e-02,
                1.35058926e-02,
                1.11895205e-02,
                9.23287950e-03,
                7.58771614e-03,
                6.21075097e-03,
                5.06346009e-03,
                4.11180172e-03,
                3.32590494e-03,
                2.67973538e-03,
                2.15075039e-03,
                1.71955399e-03,
                1.36956006e-03,
                1.08666981e-03,
                8.58968428e-04,
                6.76443820e-04,
                5.30729288e-04,
                4.14870946e-04,
                3.23119730e-04,
                2.50747289e-04,
                1.93884515e-04,
                1.49381177e-04,
                1.14684894e-04,
                8.77376133e-05,
                6.68877051e-05,
                5.08158521e-05,
                3.84730011e-05,
                2.90287464e-05,
                2.18286673e-05,
                1.63592796e-05,
                1.22194132e-05,
                9.09697239e-06,
                6.75017149e-06,
                4.99246589e-06,
                3.68051556e-06,
                2.70462002e-06,
                1.98115573e-06,
                1.44662627e-06,
                1.05300391e-06,
                7.64099584e-07,
                5.52747307e-07,
                3.98630253e-07,
                2.86609767e-07,
                2.05446572e-07,
                1.46826341e-07,
                1.04620334e-07,
                7.43267373e-08,
                5.26502675e-08,
                3.71870946e-08,
                2.61896435e-08,
                1.83917063e-08,
                1.28789035e-08,
                8.99310023e-09,
                6.26214049e-09,
                4.34838583e-09,
                3.01116258e-09,
                2.07945770e-09,
                1.43213687e-09,
                9.83662737e-10,
                6.73819846e-10,
                4.60347508e-10,
                3.13675549e-10,
                2.13175248e-10,
                1.44498191e-10,
                9.76935212e-11,
                6.58802074e-11,
                4.43137016e-11,
                2.97319183e-11,
                1.98983717e-11,
                1.32840240e-11,
                8.84640833e-12,
                4.39186836e-12,
            ]
        ]
    ]
    cres1 = np.array(cres1)

    cres2 = [
        [
            [
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                9.99999999e-01,
                9.99999999e-01,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                9.99999998e-01,
                9.99999993e-01,
                9.99999994e-01,
                1.00000000e00,
                1.00000002e00,
                1.00000002e00,
                9.99999999e-01,
                9.99999962e-01,
                9.99999940e-01,
                9.99999973e-01,
                1.00000007e00,
                1.00000018e00,
                1.00000017e00,
                9.99999959e-01,
                9.99999556e-01,
                9.99999217e-01,
                9.99999365e-01,
                1.00000035e00,
                1.00000203e00,
                1.00000340e00,
                1.00000262e00,
                9.99997675e-01,
                9.99987864e-01,
                9.99975605e-01,
                9.99967699e-01,
                9.99974537e-01,
                1.00000558e00,
                1.00005977e00,
                1.00011022e00,
                1.00008407e00,
                9.99839670e-01,
                9.99144904e-01,
                9.97660969e-01,
                9.94936340e-01,
                9.90414434e-01,
                9.83456888e-01,
                9.73381978e-01,
                9.59515273e-01,
                9.41247475e-01,
                9.18092979e-01,
                8.89742279e-01,
                8.56101925e-01,
                8.17317280e-01,
                7.73775496e-01,
                7.26088513e-01,
                6.75058258e-01,
                6.21628023e-01,
                5.66825293e-01,
                5.11701685e-01,
                4.57275459e-01,
                4.04481176e-01,
                3.54129825e-01,
                3.06881347e-01,
                2.63230002e-01,
                2.23501864e-01,
                1.87862728e-01,
                1.56334150e-01,
                1.28815062e-01,
                1.05106442e-01,
                8.49367728e-02,
                6.79864524e-02,
                5.39097837e-02,
                4.23536725e-02,
                3.29726109e-02,
                2.54398934e-02,
                1.94552984e-02,
                1.47496575e-02,
                1.10868391e-02,
                8.26370985e-03,
                6.10861676e-03,
                4.47887907e-03,
                3.25770405e-03,
                2.35085553e-03,
                1.68332282e-03,
                1.19616223e-03,
                8.43620069e-04,
                5.90594884e-04,
                4.10458367e-04,
                2.83227080e-04,
                1.94059595e-04,
                1.32043648e-04,
                8.92335599e-05,
                5.98980167e-05,
                3.99405773e-05,
                2.64592197e-05,
                1.74157687e-05,
                1.13907502e-05,
                7.40365644e-06,
                4.78259593e-06,
                3.07073947e-06,
                1.95984426e-06,
                1.24347214e-06,
                7.84372275e-07,
                4.91943284e-07,
                3.06795247e-07,
                1.90263904e-07,
                1.17346736e-07,
                7.19820791e-08,
                4.39185249e-08,
                2.66545669e-08,
                1.60925890e-08,
                9.66584556e-09,
                5.77619748e-09,
                3.43447585e-09,
                2.03199468e-09,
                1.19634152e-09,
                7.00946025e-10,
                4.08730000e-10,
                2.37211756e-10,
                1.37027700e-10,
                7.87911071e-11,
                4.50989518e-11,
                2.56980023e-11,
                1.45780272e-11,
                8.23354736e-12,
                4.63005332e-12,
                2.59249113e-12,
                1.44544628e-12,
                8.02528982e-13,
                4.43725011e-13,
                2.44332619e-13,
                1.33993251e-13,
                7.31876077e-14,
                3.98166029e-14,
                2.15765206e-14,
                1.16468205e-14,
                6.26267665e-15,
                3.35472405e-15,
                1.79025576e-15,
                9.51813339e-16,
                5.04177652e-16,
                2.66088975e-16,
                1.39925789e-16,
                7.33182482e-17,
                3.82811880e-17,
                1.99174186e-17,
                1.03269036e-17,
                5.33594407e-18,
                2.74771548e-18,
                1.41014305e-18,
                7.21291388e-19,
                3.67691580e-19,
                1.86885340e-19,
                9.45533475e-20,
                4.79404225e-20,
                2.37115056e-20,
                1.27310694e-20,
                2.67369800e-21,
            ]
        ]
    ]
    cres2 = np.array(cres2)

    cres3 = [
        [
            [
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                1.00000000e00,
                9.99999999e-01,
                9.99999999e-01,
                9.99999998e-01,
                9.99999997e-01,
                9.99999996e-01,
                9.99999992e-01,
                9.99999988e-01,
                9.99999977e-01,
                9.99999965e-01,
                9.99999935e-01,
                9.99999903e-01,
                9.99999820e-01,
                9.99999731e-01,
                9.99999499e-01,
                9.99999255e-01,
                9.99998614e-01,
                9.99997941e-01,
                9.99996190e-01,
                9.99994359e-01,
                9.99989643e-01,
                9.99984740e-01,
                9.99972298e-01,
                9.99959473e-01,
                9.99927548e-01,
                9.99895008e-01,
                9.99815884e-01,
                9.99736350e-01,
                9.99548166e-01,
                9.99362087e-01,
                9.98935116e-01,
                9.98520712e-01,
                9.97601139e-01,
                9.96726795e-01,
                9.94854670e-01,
                9.93113734e-01,
                9.89523166e-01,
                9.86262124e-01,
                9.79792542e-01,
                9.74060601e-01,
                9.63133239e-01,
                9.53698327e-01,
                9.36427537e-01,
                9.21907768e-01,
                8.96401341e-01,
                8.75537178e-01,
                8.40378779e-01,
                8.12414499e-01,
                7.67224282e-01,
                7.32294666e-01,
                6.78174275e-01,
                6.37542814e-01,
                5.77189801e-01,
                5.33200175e-01,
                4.70564614e-01,
                4.26261319e-01,
                3.65793900e-01,
                3.24305464e-01,
                2.70028089e-01,
                2.33916037e-01,
                1.88631553e-01,
                1.59426651e-01,
                1.24322194e-01,
                1.02384383e-01,
                7.71079810e-02,
                6.18067033e-02,
                4.49072754e-02,
                3.50006334e-02,
                2.45124747e-02,
                1.85605248e-02,
                1.25201360e-02,
                9.20274911e-03,
                5.97547120e-03,
                4.26070056e-03,
                2.66158628e-03,
                1.83980553e-03,
                1.10519401e-03,
                7.40179413e-04,
                4.27404601e-04,
                2.77183690e-04,
                1.53799502e-04,
                9.65363514e-05,
                5.14550934e-05,
                3.12434950e-05,
                1.59926916e-05,
                9.38944096e-06,
                4.61422039e-06,
                2.61810100e-06,
                1.23478054e-06,
                6.76699550e-07,
                3.06141480e-07,
                1.61921388e-07,
                7.02044355e-08,
                3.57908578e-08,
                1.48458395e-08,
                7.27822667e-09,
                2.87772343e-09,
                1.35034049e-09,
                5.04924114e-10,
                2.24462051e-10,
                7.79256525e-11,
                3.19978311e-11,
                9.80115906e-12,
                3.42172760e-12,
                7.38995558e-13,
                1.04998282e-13,
                -6.08063778e-14,
                -7.00202752e-14,
                -4.57797962e-14,
                -2.73273037e-14,
                -1.26821070e-14,
                -5.66553096e-15,
                -1.79362990e-15,
                -4.40132769e-16,
                1.05900605e-16,
                2.30438418e-16,
                2.68347207e-16,
                2.60728717e-16,
                2.32632994e-16,
                1.97839789e-16,
                1.63276013e-16,
                1.32036738e-16,
                1.05203244e-16,
                8.28614219e-17,
                6.46461341e-17,
                5.00217396e-17,
                3.84212334e-17,
                2.93113868e-17,
                2.22197912e-17,
                1.67426198e-17,
                1.25429101e-17,
                9.34448747e-18,
                6.92421024e-18,
                5.10394011e-18,
                3.74295077e-18,
                2.73111210e-18,
                1.98297560e-18,
                1.43276787e-18,
                1.03023499e-18,
                7.37248326e-19,
                5.25070294e-19,
                3.72178066e-19,
                2.62549962e-19,
                1.84329161e-19,
                1.28790859e-19,
                8.95504779e-20,
                6.19612940e-20,
                4.26594479e-20,
                2.92226046e-20,
                1.99155401e-20,
                1.35016962e-20,
                9.10444343e-21,
                4.57287749e-21,
            ]
        ]
    ]
    cres3 = np.array(cres3)

    creslist = [cres1, cres2, cres3]

    assert np.allclose(
        creslist[sim.idxsim], conc
    ), "simulated concentrations do not match with known solution."

    return
 def test_compute_ring_center(self):
   # FIXME might break with different version of rdkit
   self.assertTrue(
       np.allclose(rgf.compute_ring_center(self.cycle4, range(4)), 0))
Example #47
0
    def helper_func(self, config_idx):
        (in_ch, out_ch, k_size, stride, padding, has_bias, batch_size, height,
         width) = self.configs[config_idx]

        torch_conv2d = Conv2d(
            in_ch,
            out_ch,
            k_size,
            stride=stride,
            padding=padding,
            bias=has_bias)
        torch_conv2d.type(torch.DoubleTensor)

        conv2d_layer = Conv2DLayer(
            in_ch, (k_size, k_size),
            out_ch,
            lambda t: torch.nn.init.normal(t, -1, 1),
            stride=(stride, stride),
            padding=(padding, padding),
            bias=has_bias)
        conv2d_layer.type(torch.DoubleTensor)

        input_tensor = (torch.DoubleTensor(batch_size, in_ch, height, width)
                        .uniform_(-1, 1))
        input_layer = Variable(input_tensor, requires_grad=True)
        input_torch = Variable(input_tensor.clone(), requires_grad=True)

        bias_tensor = torch.DoubleTensor(out_ch).uniform_(-1, 1)
        weights = (torch.DoubleTensor(out_ch, in_ch, k_size, k_size).uniform_(
            -1, 1))
        torch_conv2d.weight.data.copy_(weights)
        if has_bias:
            torch_conv2d.bias.data.copy_(bias_tensor)
        layer_weight_shape = (out_ch, in_ch * k_size * k_size)
        conv2d_layer.kernels.data.copy_(weights.view(layer_weight_shape))
        if has_bias:
            conv2d_layer.bias.data.copy_(bias_tensor.view(out_ch, 1))

        layer_result = conv2d_layer(input_layer)
        layer_result_np = layer_result.data.numpy()
        torch_result = torch_conv2d(input_torch)
        torch_result_np = torch_result.data.numpy()
        self.assertTrue(np.allclose(layer_result_np, torch_result_np))

        # verify gradient
        gradient = torch.DoubleTensor(layer_result.shape)
        layer_result.backward(gradient)
        torch_result.backward(gradient)
        self.assertTrue(
            np.allclose(
                input_layer.grad.data.numpy(),
                input_torch.grad.data.numpy(),
                equal_nan=True))
        layer_weight_grad = conv2d_layer.kernels.grad
        torch_weight_grad = torch_conv2d.weight.grad.view(layer_weight_shape)
        self.assertTrue(
            np.allclose(
                layer_weight_grad.data.numpy(),
                torch_weight_grad.data.numpy(),
                equal_nan=True))
        if has_bias:
            layer_bias_grad = conv2d_layer.bias.grad.view(out_ch)
            torch_bias_grad = torch_conv2d.bias.grad.view(out_ch)
            self.assertTrue(
                np.allclose(
                    layer_bias_grad.data.numpy(),
                    torch_bias_grad.data.numpy(),
                    equal_nan=True))
    def test_inundation_damage_list(self):

        # create mesh
        mesh_file = tempfile.mktemp(".tsh")
        points = [[0.0,0.0],[6.0,0.0],[6.0,6.0],[0.0,6.0]]
        m = Mesh()
        m.add_vertices(points)
        m.auto_segment()
        m.generate_mesh(verbose=False)
        m.export_mesh_file(mesh_file)

        #Create shallow water domain
        domain = Domain(mesh_file)
        os.remove(mesh_file)

        domain.default_order=2

        #Set some field values
        domain.set_quantity('elevation', elevation_function)
        domain.set_quantity('friction', 0.03)
        domain.set_quantity('xmomentum', 22.0)
        domain.set_quantity('ymomentum', 55.0)

        ######################
        # Boundary conditions
        B = Transmissive_boundary(domain)
        domain.set_boundary( {'exterior': B})

        # This call mangles the stage values.
        domain.distribute_to_vertices_and_edges()
        domain.set_quantity('stage', 0.3)

        #sww_file = tempfile.mktemp("")
        domain.set_name('datatest' + str(time.time()))
        domain.format = 'sww'
        domain.smooth = True
        domain.reduction = mean

        sww = SWW_file(domain)
        sww.store_connectivity()
        sww.store_timestep()
        domain.set_quantity('stage', -0.3)
        domain.time = 2.
        sww.store_timestep()

        #Create a csv file
        csv_file = tempfile.mktemp(".csv")
        fd = open(csv_file,'w',newline="")
        writer = csv.writer(fd)
        writer.writerow(['x','y',STR_VALUE_LABEL,CONT_VALUE_LABEL,'ROOF_TYPE',WALL_TYPE_LABEL, SHORE_DIST_LABEL])
        writer.writerow([5.5,0.5,'10','130000','Metal','Timber',20])
        writer.writerow([4.5,1.0,'150','76000','Metal','Double Brick',20])
        writer.writerow([0.1,1.5,'100','76000','Metal','Brick Veneer',300])
        writer.writerow([6.1,1.5,'100','76000','Metal','Brick Veneer',300])
        fd.close()

        extension = ".csv"
        csv_fileII = tempfile.mktemp(extension)
        fd = open(csv_fileII,'w',newline="")
        writer = csv.writer(fd)
        writer.writerow(['x','y',STR_VALUE_LABEL,CONT_VALUE_LABEL,'ROOF_TYPE',WALL_TYPE_LABEL, SHORE_DIST_LABEL])
        writer.writerow([5.5,0.5,'10','130000','Metal','Timber',20])
        writer.writerow([4.5,1.0,'150','76000','Metal','Double Brick',20])
        writer.writerow([0.1,1.5,'100','76000','Metal','Brick Veneer',300])
        writer.writerow([6.1,1.5,'100','76000','Metal','Brick Veneer',300])
        fd.close()

        sww_file = domain.get_name() + "." + domain.format
        #print "sww_file",sww_file
        marker='_gosh'
        inundation_damage(sww_file, [csv_file, csv_fileII],
                          exposure_file_out_marker=marker,
                          verbose=False)

        # Test one file
        csv_handle = Exposure(csv_file[:-4]+marker+extension)
        struct_loss = csv_handle.get_column(EventDamageModel.STRUCT_LOSS_TITLE)
        #print "struct_loss",struct_loss
        struct_loss = [float(x) for x in struct_loss]
        #pprint(struct_loss)
        assert num.allclose(struct_loss,[10.0, 150.0, 66.55333347876866, 0.0])
        depth = csv_handle.get_column(EventDamageModel.MAX_DEPTH_TITLE)
        #print "depth",depth
        depth = [float(x) for x in depth]
        assert num.allclose(depth, [3.000000011920929, 2.9166666785875957, 2.2666666785875957, -0.3])

        # Test another file
        csv_handle = Exposure(csv_fileII[:-4]+marker+extension)
        struct_loss = csv_handle.get_column(EventDamageModel.STRUCT_LOSS_TITLE)
        #print "struct_loss",struct_loss
        struct_loss = [float(x) for x in struct_loss]

        #pprint(struct_loss)
        assert num.allclose(struct_loss, [10.0, 150.0, 66.553333478768664, 0.0])
        depth = csv_handle.get_column(EventDamageModel.MAX_DEPTH_TITLE)
        #print "depth",depth
        depth = [float(x) for x in depth]
        assert num.allclose(depth,[3.000000011920929, 2.9166666785875957, 2.2666666785875957, -0.3])
        os.remove(sww.filename)
        os.remove(csv_file)
        os.remove(csv_fileII)
N = 1000
signal = np.array([random.uniform(0, 1) for i in range(N)])
fractionalTransformer = frft.FractionalFourierTransform()

# First Test: Identity Matrix:
print("""=========================================================
Test One: In this test the signal is given to the fractional
transform with ratio = 0, it is supposed to return the identity.
""")

testRes = fractionalTransformer.frft(
	y=signal, alpha=0
)

if np.allclose(testRes, signal):
	print("Identity Test Passed...!")
else:
	print("X: Identity Test Failed...!")
print("""=========================================================

""")

# Second Test: Conventional Fourier Matrix:
print("""=========================================================
Test Two: In this test the signal is given to the fractional
transform with ratio = 1, it is supposed to return the conventional
Fourier Transform of the input.
""")

testRes = fractionalTransformer.frft(
Example #50
0
        de = []
        for i in range(3):
            mol._env[ptr+i] = coord[i] + inc
            mf = scf.RHF(mol).run(conv_tol=1e-14)
            e1a = mf.nuc_grad_method().kernel()
            mol._env[ptr+i] = coord[i] - inc
            mf = scf.RHF(mol).run(conv_tol=1e-14)
            e1b = mf.nuc_grad_method().kernel()
            mol._env[ptr+i] = coord[i]
            de.append((e1a-e1b)/(2*inc))
        return de
    e2ref = [grad_full(ia, .5e-4) for ia in range(mol.natm)]
    e2ref = numpy.asarray(e2ref).reshape(n3,n3)
    print(numpy.linalg.norm(e2-e2ref))
    print(abs(e2-e2ref).max())
    print(numpy.allclose(e2,e2ref,atol=1e-6))

# \partial^2 E / \partial R \partial R'
    e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)
    e2 += hobj.hess_nuc(mol)
    e2 = e2.transpose(0,2,1,3).reshape(n3,n3)
    def grad_partial_R(ia, inc):
        coord = mol.atom_coord(ia).copy()
        ptr = mol._atm[ia,gto.PTR_COORD]
        de = []
        for i in range(3):
            mol._env[ptr+i] = coord[i] + inc
            e1a = mf.nuc_grad_method().kernel()
            mol._env[ptr+i] = coord[i] - inc
            e1b = mf.nuc_grad_method().kernel()
            mol._env[ptr+i] = coord[i]
Example #51
0
    def test_batch(self):
        numpy.random.seed(734)
        d = 2
        m = 2
        n = 15
        paths = [numpy.random.uniform(-1, 1, size=(6, d)) for i in range(n)]
        pathArray15 = stack(paths)
        pathArray1315 = numpy.reshape(pathArray15, (1, 3, 1, 5, 6, d))
        sigs = [iisignature.sig(i, m) for i in paths]
        sigArray = stack(sigs)
        sigArray15 = iisignature.sig(pathArray15, m)
        sigArray1315 = iisignature.sig(pathArray1315, m)
        siglength = iisignature.siglength(d, m)
        self.assertEqual(sigArray1315.shape, (1, 3, 1, 5, siglength))
        self.assertTrue(
            numpy.allclose(sigArray1315.reshape(n, siglength), sigs))
        self.assertEqual(sigArray15.shape, (15, siglength))
        self.assertTrue(numpy.allclose(sigArray15, sigs))

        backsigs = [
            iisignature.sigbackprop(i, j, m) for i, j in zip(sigs, paths)
        ]
        backsigArray = stack(backsigs)
        backsigs1315 = iisignature.sigbackprop(sigArray1315, pathArray1315, m)
        self.assertEqual(backsigs1315.shape, (1, 3, 1, 5, 6, d))
        self.assertTrue(
            numpy.allclose(backsigs1315.reshape(n, 6, 2), backsigArray))

        data = [numpy.random.uniform(size=(d, )) for i in range(n)]
        dataArray1315 = stack(data).reshape((1, 3, 1, 5, d))
        joined = [iisignature.sigjoin(i, j, m) for i, j in zip(sigs, data)]
        joined1315 = iisignature.sigjoin(sigArray1315, dataArray1315, m)
        self.assertEqual(joined1315.shape, (1, 3, 1, 5, siglength))
        self.assertTrue(
            numpy.allclose(joined1315.reshape(n, -1), stack(joined)))
        backjoined = [
            iisignature.sigjoinbackprop(i, j, k, m)
            for i, j, k in zip(joined, sigs, data)
        ]
        backjoinedArrays = [
            stack([i[j] for i in backjoined]) for j in range(2)
        ]
        backjoined1315 = iisignature.sigjoinbackprop(joined1315, sigArray1315,
                                                     dataArray1315, m)
        self.assertEqual(backjoined1315[0].shape, sigArray1315.shape)
        self.assertEqual(backjoined1315[1].shape, dataArray1315.shape)
        self.assertTrue(
            numpy.allclose(backjoined1315[0].reshape(n, -1),
                           backjoinedArrays[0]))
        self.assertTrue(
            numpy.allclose(backjoined1315[1].reshape(n, -1),
                           backjoinedArrays[1]))

        scaled = [iisignature.sigscale(i, j, m) for i, j in zip(sigs, data)]
        scaled1315 = iisignature.sigscale(sigArray1315, dataArray1315, m)
        self.assertEqual(scaled1315.shape, (1, 3, 1, 5, siglength))
        self.assertTrue(
            numpy.allclose(scaled1315.reshape(n, -1), stack(scaled)))
        backscaled = [
            iisignature.sigscalebackprop(i, j, k, m)
            for i, j, k in zip(scaled, sigs, data)
        ]
        backscaledArrays = [
            stack([i[j] for i in backscaled]) for j in range(2)
        ]
        backscaled1315 = iisignature.sigscalebackprop(scaled1315, sigArray1315,
                                                      dataArray1315, m)
        self.assertEqual(backscaled1315[0].shape, sigArray1315.shape)
        self.assertEqual(backscaled1315[1].shape, dataArray1315.shape)
        self.assertTrue(
            numpy.allclose(backscaled1315[0].reshape(n, -1),
                           backscaledArrays[0]))
        self.assertTrue(
            numpy.allclose(backscaled1315[1].reshape(n, -1),
                           backscaledArrays[1]))

        s_s = (iisignature.prepare(d, m,
                                   "cosx"), iisignature.prepare(d, m, "coshx"))
        for type in ("c", "o", "s", "x", "ch", "oh", "sh"):
            s = s_s[1 if "h" in type else 0]
            logsigs = [iisignature.logsig(i, s, type) for i in paths]
            logsigArray = stack(logsigs)
            logsigArray1315 = iisignature.logsig(pathArray1315, s, type)
            self.assertEqual(logsigArray1315.shape,
                             (1, 3, 1, 5, logsigs[0].shape[0]), type)
            self.assertTrue(
                numpy.allclose(logsigArray1315.reshape(n, -1), logsigArray),
                type)

            if type in ("s", "x", "sh"):
                backlogs = stack(
                    iisignature.logsigbackprop(i, j, s, type)
                    for i, j in zip(logsigs, paths))
                backlogs1315 = iisignature.logsigbackprop(
                    logsigArray1315, pathArray1315, s, type)
                self.assertEqual(backlogs1315.shape, backsigs1315.shape)
                self.assertTrue(
                    numpy.allclose(backlogs1315.reshape(n, 6, d), backlogs),
                    type)

        a = iisignature.rotinv2dprepare(m, "a")
        rots = stack([iisignature.rotinv2d(i, a) for i in paths])
        rots1315 = iisignature.rotinv2d(pathArray1315, a)
        self.assertEqual(rots1315.shape, (1, 3, 1, 5, rots.shape[1]))
        self.assertTrue(numpy.allclose(rots1315.reshape(n, -1), rots))
Example #52
0
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

#1.1 Smoothing
from edge import conv, gaussian_kernel

# Define 3x3 Gaussian kernel with std = 1
kernel = gaussian_kernel(3, 1)
kernel_test = np.array(
    [[ 0.05854983, 0.09653235, 0.05854983],
     [ 0.09653235, 0.15915494, 0.09653235],
     [ 0.05854983, 0.09653235, 0.05854983]]
)

# Test Gaussian kernel
if not np.allclose(kernel, kernel_test):
    print('Incorrect values! Please check your implementation.')
    
# Test with different kernel_size and sigma
kernel_size = 5
sigma = 1.4

# Load image
img = io.imread('iguana.png', as_grey=True)

# Define 5x5 Gaussian kernel with std = sigma
kernel = gaussian_kernel(kernel_size, sigma)

# Convolve image with kernel to achieve smoothed effect
smoothed = conv(img, kernel)
Example #53
0
        def test_mlp(sort_sum_gradient):
            fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
            input_size = 5
            paddle.seed(1)
            mlp1 = MLP(input_size=input_size)
            # generate the gradient of each step
            mlp2 = MLP(input_size=input_size)

            expected_weight1_grad = 0.
            expected_bias1_grad = 0.
            expected_weight2_grad = 0.
            expected_bias2_grad = 0.

            for batch_id in range(100):
                x = paddle.uniform([10, input_size])
                detach_x = x.detach()
                clear_loss = mlp2(detach_x)
                clear_loss.backward()
                expected_weight1_grad = (expected_weight1_grad +
                                         mlp2._linear1.weight.grad.numpy())
                expected_bias1_grad = (expected_bias1_grad +
                                       mlp2._linear1.bias.grad.numpy())
                expected_weight2_grad = (expected_weight2_grad +
                                         mlp2._linear2.weight.grad.numpy())
                expected_bias2_grad = (expected_bias2_grad +
                                       mlp2._linear2.bias.grad.numpy())

                loss = mlp1(x)
                loss.backward()

                self.assertTrue(np.array_equal(loss.grad.numpy(), [1]))
                self.assertTrue(
                    np.allclose(mlp1._linear1.weight.grad.numpy(),
                                expected_weight1_grad))
                self.assertTrue(
                    np.allclose(mlp1._linear1.bias.grad.numpy(),
                                expected_bias1_grad))
                self.assertTrue(
                    np.allclose(mlp1._linear2.weight.grad.numpy(),
                                expected_weight2_grad))
                self.assertTrue(
                    np.allclose(mlp1._linear2.bias.grad.numpy(),
                                expected_bias2_grad))

                mlp2.clear_gradients()
                self.assertTrue(np.array_equal(clear_loss.grad.numpy(), [1]))
                if ((batch_id + 1) % 10) % 2 == 0:
                    mlp1.clear_gradients()
                    expected_weight1_grad = 0.
                    expected_bias1_grad = 0.
                    expected_weight2_grad = 0.
                    expected_bias2_grad = 0.
                elif ((batch_id + 1) % 10) % 2 == 1:
                    mlp1.clear_gradients()
                    mlp1._linear1.weight._set_grad_ivar(
                        paddle.ones([input_size, 3]))
                    mlp1._linear2.weight._set_grad_ivar(paddle.ones([3, 4]))
                    expected_weight1_grad = 1.
                    expected_bias1_grad = 0.
                    expected_weight2_grad = 1.
                    expected_bias2_grad = 0.
Example #54
0
    def dotest(self, type):
        m = 8
        nPaths = 95
        nAngles = 348
        numpy.random.seed(775)
        s = iisignature.rotinv2dprepare(m, type)
        coeffs = iisignature.rotinv2dcoeffs(s)
        angles = numpy.random.uniform(0, math.pi * 2, size=nAngles + 1)
        angles[0] = 0
        rotationMatrices = [
            numpy.array([[math.cos(i), math.sin(i)],
                         [-math.sin(i), math.cos(i)]]) for i in angles
        ]
        paths = [
            numpy.random.uniform(-1, 1, size=(32, 2)) for i in range(nPaths)
        ]
        samePathRotInvs = [
            iisignature.rotinv2d(numpy.dot(paths[0], mtx), s)
            for mtx in rotationMatrices
        ]

        #check the length matches
        (length, ) = samePathRotInvs[0].shape
        self.assertEqual(length, sum(i.shape[0] for i in coeffs))
        self.assertEqual(length, iisignature.rotinv2dlength(s))
        if type == "a":
            self.assertEqual(length, sumCentralBinomialCoefficient(m // 2))

        self.assertLess(length, nAngles)  #sanity check on the test itself

        #check that the invariants are invariant
        if 0:
            print("\n", numpy.column_stack(samePathRotInvs[0:7]))
        for i in range(nAngles):
            if 0 and diff(samePathRotInvs[0], samePathRotInvs[1 + i]) > 0.01:
                print(i)
                print(samePathRotInvs[0] - samePathRotInvs[1 + i])
                print(diff(samePathRotInvs[0], samePathRotInvs[1 + i]))
            self.assertLess(diff(samePathRotInvs[0], samePathRotInvs[1 + i]),
                            0.01)

        #check that the invariants match the coefficients
        if 1:
            sigLevel = iisignature.sig(paths[0],
                                       m)[iisignature.siglength(2, m - 1):]
            lowerRotinvs = 0 if 2 == m else iisignature.rotinv2dlength(
                iisignature.rotinv2dprepare(m - 2, type))
            #print("\n",numpy.dot(coeffs[-1],sigLevel),"\n",samePathRotInvs[0][lowerRotinvs:])
            #print(numpy.dot(coeffs[-1],sigLevel)-samePathRotInvs[0][lowerRotinvs:])
            self.assertTrue(
                numpy.allclose(numpy.dot(coeffs[-1], sigLevel),
                               samePathRotInvs[0][lowerRotinvs:],
                               atol=0.000001))

        #check that we are not missing invariants
        if type == "a":
            #print("\nrotinvlength=",length,"
            #siglength=",iisignature.siglength(2,m))
            sigOffsets = []
            for path in paths:
                samePathSigs = [
                    iisignature.sig(numpy.dot(path, mtx), m)
                    for mtx in rotationMatrices[1:70]
                ]
                samePathSigsOffsets = [
                    i - samePathSigs[0] for i in samePathSigs[1:]
                ]
                sigOffsets.extend(samePathSigsOffsets)
            #print(numpy.linalg.svd(numpy.row_stack(sigOffsets))[1])
            def split(a, dim, level):
                start = 0
                out = []
                for m in range(1, level + 1):
                    levelLength = dim**m
                    out.append(a[:, start:(start + levelLength)])
                    start = start + levelLength
                assert (start == a.shape[1])
                return out

            allOffsets = numpy.row_stack(sigOffsets)
            #print (allOffsets.shape)
            splits = split(allOffsets, 2, m)
            #print()
            rank_tolerance = 0.01  # this is hackish
            #print
            #([numpy.linalg.matrix_rank(i.astype("float64"),rank_tolerance)
            #for
            #i in splits])
            #print ([i.shape for i in splits])
            #print(numpy.linalg.svd(splits[-1])[1])

            #sanity check on the test
            self.assertLess(splits[-1].shape[1], splits[0].shape[0])
            totalUnspannedDimensions = sum(
                i.shape[1] - numpy.linalg.matrix_rank(i, rank_tolerance)
                for i in splits)
            self.assertEqual(totalUnspannedDimensions, length)

        if 0:  #This doesn't work - the rank of the whole thing is less than
            #sigLength-totalUnspannedDimensions, which suggests that there are
            #inter-level dependencies,
            #even though the shuffle product dependencies aren't linear.
            #I don't know why this is.
            sigLength = iisignature.siglength(2, m)
            numNonInvariant = numpy.linalg.matrix_rank(
                numpy.row_stack(sigOffsets))

            predictedNumberInvariant = sigLength - numNonInvariant
            print(sigLength, length, numNonInvariant)
            self.assertLess(sigLength, nAngles)
            self.assertEqual(predictedNumberInvariant, length)
Example #55
0
 def test_pdf(self):
     beta = Beta(np.ones(2), np.ones(2))
     self.assertTrue(
         np.allclose(beta.pdf(np.random.uniform(size=(5, 2))), 1.))
Example #56
0
    def test_apply_decay_pass(self):
        """Test _apply_land_decay against MATLAB reference."""
        v_rel = {
            4: 0.0038950967656296597,
            -1: 0.0038950967656296597,
            0: 0.0038950967656296597,
            1: 0.0038950967656296597,
            2: 0.0038950967656296597,
            3: 0.0038950967656296597,
            5: 0.0038950967656296597
        }

        p_rel = {
            4: (1.0499941, 0.007978940084158488),
            -1: (1.0499941, 0.007978940084158488),
            0: (1.0499941, 0.007978940084158488),
            1: (1.0499941, 0.007978940084158488),
            2: (1.0499941, 0.007978940084158488),
            3: (1.0499941, 0.007978940084158488),
            5: (1.0499941, 0.007978940084158488)
        }

        tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL)
        tc_track.data[0]['orig_event_flag'] = False
        extent = tc_track.get_extent()
        land_geom = climada.util.coordinates.get_land_geometry(extent=extent,
                                                               resolution=10)
        tc.track_land_params(tc_track.data[0], land_geom)
        tc_synth._apply_land_decay(tc_track.data,
                                   v_rel,
                                   p_rel,
                                   land_geom,
                                   s_rel=True,
                                   check_plot=False)

        p_ref = np.array([
            1.010000000000000, 1.009000000000000, 1.008000000000000,
            1.006000000000000, 1.003000000000000, 1.002000000000000,
            1.001000000000000, 1.000000000000000, 1.000000000000000,
            1.001000000000000, 1.002000000000000, 1.005000000000000,
            1.007000000000000, 1.010000000000000, 1.010000000000000,
            1.010000000000000, 1.010000000000000, 1.010000000000000,
            1.010000000000000, 1.007000000000000, 1.004000000000000,
            1.000000000000000, 0.994000000000000, 0.981000000000000,
            0.969000000000000, 0.961000000000000, 0.947000000000000,
            0.933000000000000, 0.922000000000000, 0.930000000000000,
            0.937000000000000, 0.951000000000000, 0.947000000000000,
            0.943000000000000, 0.948000000000000, 0.946000000000000,
            0.941000000000000, 0.937000000000000, 0.955000000000000,
            0.9741457117, 0.99244068917, 1.00086729492, 1.00545853355,
            1.00818354609, 1.00941850023, 1.00986192053, 1.00998400565
        ]) * 1e3

        self.assertTrue(
            np.allclose(p_ref, tc_track.data[0].central_pressure.values))

        v_ref = np.array([
            0.250000000000000, 0.300000000000000, 0.300000000000000,
            0.350000000000000, 0.350000000000000, 0.400000000000000,
            0.450000000000000, 0.450000000000000, 0.450000000000000,
            0.450000000000000, 0.450000000000000, 0.450000000000000,
            0.450000000000000, 0.400000000000000, 0.400000000000000,
            0.400000000000000, 0.400000000000000, 0.450000000000000,
            0.450000000000000, 0.500000000000000, 0.500000000000000,
            0.550000000000000, 0.650000000000000, 0.800000000000000,
            0.950000000000000, 1.100000000000000, 1.300000000000000,
            1.450000000000000, 1.500000000000000, 1.250000000000000,
            1.300000000000000, 1.150000000000000, 1.150000000000000,
            1.150000000000000, 1.150000000000000, 1.200000000000000,
            1.250000000000000, 1.250000000000000, 1.200000000000000,
            0.9737967353, 0.687255951, 0.4994850556, 0.3551480462,
            0.2270548036, 0.1302099557, 0.0645385918, 0.0225325851
        ]) * 1e2

        self.assertTrue(
            np.allclose(v_ref, tc_track.data[0].max_sustained_wind.values))

        cat_ref = tc.set_category(tc_track.data[0].max_sustained_wind.values,
                                  tc_track.data[0].max_sustained_wind_unit)
        self.assertEqual(cat_ref, tc_track.data[0].category)
Example #57
0
 def test_mean(self):
     beta = Beta(np.ones(2) * 3, np.ones(2))
     self.assertTrue(np.allclose(beta.mean, 0.75))
def test_exercise_2(hmm, simple):
    hmm.train_supervised(simple.train)
    assert np.allclose(hmm.initial_probs, np.array([2 / 3, 1 / 3]), rtol=tolerance)
    assert np.allclose(hmm.transition_probs, np.array([[1 / 2, 0.], [1 / 2, 5 / 8]]), rtol=tolerance)
    assert np.allclose(hmm.final_probs, np.array([0., 3 / 8]), rtol=tolerance)
    assert np.allclose(hmm.emission_probs, np.array([[0.75, 0.25], [0.25, 0.375], [0., 0.375], [0., 0.]]), rtol=tolerance)
    def test_boyd_non_skew7(self):
        """test_boyd_non_skew
        
        This tests the Boyd routine with data obtained from culvertw application 1.1 by IceMindserer  BD Parkinson, 
        calculation code by MJ Boyd
        This tests the blockage code 
        """

        stage_0 = 15.0 #change
        stage_1 = 14.0 #change
        elevation_0 = 11.0
        elevation_1 = 10.0

        domain_length = 200.0
        domain_width = 200.0

        culvert_length = 20.0
        culvert_width = 1.2
        ##culvert_height = 3.66
        culvert_blockage = 1.0
        
        culvert_losses = {'inlet':0.5, 'outlet':1.0, 'bend':0.0, 'grate':0.0, 'pier': 0.0, 'other': 0.0}
        culvert_mannings = 0.013
        
        culvert_apron = 0.0
        enquiry_gap = 5.0

        
        expected_Q = 0.0
        expected_v = 0.0
        expected_d = 0.0
        

        domain = self._create_domain(d_length=domain_length,
                                     d_width=domain_width,
                                     dx = 5.0,
                                     dy = 5.0,
                                     elevation_0 = elevation_0,
                                     elevation_1 = elevation_1,
                                     stage_0 = stage_0,
                                     stage_1 = stage_1)
 

        #print 'Defining Structures'
        
        ep0 = numpy.array([domain_length/2-culvert_length/2, 100.0])
        ep1 = numpy.array([domain_length/2+culvert_length/2, 100.0])
        
        
        culvert = Boyd_pipe_operator(domain,
                                    losses=culvert_losses,
                                    diameter=culvert_width,
                                    blockage=culvert_blockage,
                                    end_points=[ep0, ep1],
                                    #height=culvert_height,
                                    apron=culvert_apron,
                                    enquiry_gap=enquiry_gap,
                                    use_momentum_jet=False,
                                    use_velocity_head=False,
                                    manning=culvert_mannings,
                                    logging=False,
                                    label='1.2pipe',
                                    verbose=False)

        #culvert.determine_inflow_outflow()
        
        ( Q, v, d ) = culvert.discharge_routine()
        
        if verbose:
            print 'test_boyd_non_skew7'
            print 'Q: ', Q, 'expected_Q: ', expected_Q
            print 'v: ', v, 'expected_v: ', expected_v
            print 'd: ', d, 'expected_d: ', expected_d


        assert numpy.allclose(Q, expected_Q, rtol=1.0e-2, atol=1.0e-5) #inflow
        assert numpy.allclose(v, expected_v, rtol=1.0e-2, atol=1.0e-5) #outflow velocity
        assert numpy.allclose(d, expected_d, rtol=1.0e-2, atol=1.0e-5) #depth at outlet used to calc v  
Example #60
0
 def test_var(self):
     beta = Beta(np.ones(2) * 3, np.ones(2))
     self.assertTrue(np.allclose(beta.var, np.ones(2) * 3 / 80))