Exemplo n.º 1
0
def test_light_source_planar_hillshading():
    """Ensure that the illumination intensity is correct for planar
    surfaces."""

    def plane(azimuth, elevation, x, y):
        """Create a plane whose normal vector is at the given azimuth and
        elevation."""
        theta, phi = _azimuth2math(azimuth, elevation)
        a, b, c = _sph2cart(theta, phi)
        z = -(a*x + b*y) / c
        return z

    def angled_plane(azimuth, elevation, angle, x, y):
        """Create a plane whose normal vector is at an angle from the given
        azimuth and elevation."""
        elevation = elevation + angle
        if elevation > 90:
            azimuth = (azimuth + 180) % 360
            elevation = (90 - elevation) % 90
        return plane(azimuth, elevation, x, y)

    y, x = np.mgrid[5:0:-1, :5]
    for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
        ls = mcolors.LightSource(az, elev)

        # Make a plane at a range of angles to the illumination
        for angle in range(0, 105, 15):
            z = angled_plane(az, elev, angle, x, y)
            h = ls.hillshade(z)
            assert_array_almost_equal(h, np.cos(np.radians(angle)))
Exemplo n.º 2
0
def test_average_pooling_feature_map_fprop_weight_and_bias():
    """
    Test that AveragePoolingFeatureMap really does what we think it does
    on a forward pass.
    """
    apfmap = AveragePoolingFeatureMap((2, 2), (4, 4))
    apfmap.biases[:] = -9.0
    apfmap.weights[:] = 4.4
    inp = np.arange(1, 17).reshape((4, 4))
    out = apfmap.fprop(inp)
    assert_array_almost_equal(
        out,
        1.7159
        * np.tanh(
            2.0
            / 3.0
            * (
                -9
                + 4.4
                * np.array(
                    [
                        [(1 + 2 + 5 + 6) / 4.0, (3 + 4 + 7 + 8) / 4.0],
                        [(9 + 10 + 13 + 14) / 4.0, (11 + 12 + 15 + 16) / 4.0],
                    ]
                )
            )
        ),
    )
Exemplo n.º 3
0
def test_light_source_hillshading():
    """Compare the current hillshading method against one that should be
    mathematically equivalent. Illuminates a cone from a range of angles."""

    def alternative_hillshade(azimuth, elev, z):
        illum = _sph2cart(*_azimuth2math(azimuth, elev))
        illum = np.array(illum)

        dy, dx = np.gradient(-z)
        dy = -dy
        dz = np.ones_like(dy)
        normals = np.dstack([dx, dy, dz])
        dividers = np.zeros_like(z)[..., None]
        for i, mat in enumerate(normals):
            for j, vec in enumerate(mat):
                dividers[i, j, 0] = np.linalg.norm(vec)
        normals /= dividers
        # once we drop support for numpy 1.7.x the above can be written as
        # normals /= np.linalg.norm(normals, axis=2)[..., None]
        # aviding the double loop.

        intensity = np.tensordot(normals, illum, axes=(2, 0))
        intensity -= intensity.min()
        intensity /= intensity.ptp()
        return intensity

    y, x = np.mgrid[5:0:-1, :5]
    z = -np.hypot(x - x.mean(), y - y.mean())

    for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
        ls = mcolors.LightSource(az, elev)
        h1 = ls.hillshade(z)
        h2 = alternative_hillshade(az, elev, z)
        assert_array_almost_equal(h1, h2)
Exemplo n.º 4
0
def test_light_source_hillshading():
    """Compare the current hillshading method against one that should be
    mathematically equivalent. Illuminates a cone from a range of angles."""

    def alternative_hillshade(azimuth, elev, z):
        illum = _sph2cart(*_azimuth2math(azimuth, elev))
        illum = np.array(illum)

        dy, dx = np.gradient(-z)
        dy = -dy
        dz = np.ones_like(dy)
        normals = np.dstack([dx, dy, dz])
        normals /= np.linalg.norm(normals, axis=2)[..., None]

        intensity = np.tensordot(normals, illum, axes=(2, 0))
        intensity -= intensity.min()
        intensity /= intensity.ptp()
        return intensity

    y, x = np.mgrid[5:0:-1, :5]
    z = -np.hypot(x - x.mean(), y - y.mean())

    for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
        ls = mcolors.LightSource(az, elev)
        h1 = ls.hillshade(z)
        h2 = alternative_hillshade(az, elev, z)
        assert_array_almost_equal(h1, h2)
Exemplo n.º 5
0
def test_light_source_shading_color_range():
    # see also
    #http://matplotlib.org/examples/pylab_examples/shading_example.html

    from matplotlib.colors import LightSource
    from matplotlib.colors import Normalize

    refinput = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
    norm = Normalize(vmin=0, vmax=50)
    ls = LightSource(azdeg=0, altdeg=65)
    testoutput = ls.shade(refinput, plt.cm.jet, norm=norm)
    refoutput = np.array([
        [[0., 0., 0.58912656, 1.],
        [0., 0., 0.67825312, 1.],
        [0., 0., 0.76737968, 1.],
        [0., 0., 0.85650624, 1.]],
        [[0., 0., 0.9456328, 1.],
        [0., 0., 1., 1.],
        [0., 0.04901961, 1., 1.],
        [0., 0.12745098, 1., 1.]],
        [[0., 0.22156863, 1., 1.],
        [0., 0.3, 1., 1.],
        [0., 0.37843137, 1., 1.],
        [0., 0.45686275, 1., 1.]]
        ])
    assert_array_almost_equal(refoutput, testoutput)
Exemplo n.º 6
0
 def test_basic_gridded_to_gridded_collocation(self):
     gd_copy = self.gd.copy()
     res = self.gd.collocated_onto(self.gd)
     assert self.gd == gd_copy
     res_1 = self.gd.sampled_from(self.gd)
     # The two data results should be identical, although the metadata is slightly different for some reason
     assert_array_almost_equal(res[0].data, res_1[0].data)
def test_as_meg_type_evoked():
    """Test interpolation of data on to virtual channels."""

    # validation tests
    evoked = read_evokeds(evoked_fname, condition="Left Auditory")
    assert_raises(ValueError, evoked.as_type, "meg")
    assert_raises(ValueError, evoked.copy().pick_types(meg="grad").as_type, "meg")

    # channel names
    ch_names = evoked.info["ch_names"]
    virt_evoked = evoked.pick_channels(ch_names=ch_names[:10:1], copy=True).as_type("mag")
    assert_true(all("_virtual" in ch for ch in virt_evoked.info["ch_names"]))

    # pick from and to channels
    evoked_from = evoked.pick_channels(ch_names=ch_names[2:10:3], copy=True)
    evoked_to = evoked.pick_channels(ch_names=ch_names[0:10:3], copy=True)

    info_from, info_to = evoked_from.info, evoked_to.info

    # set up things
    args1, args2 = _setup_args(info_from), _setup_args(info_to)
    args1.update(coils2=args2["coils1"]), args2.update(coils2=args1["coils1"])

    # test cross dots
    cross_dots1 = _do_cross_dots(**args1)
    cross_dots2 = _do_cross_dots(**args2)

    assert_array_almost_equal(cross_dots1, cross_dots2.T)

    # correlation test
    evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
    data1 = evoked.pick_types(meg="grad").data.ravel()
    data2 = evoked.as_type("grad").data.ravel()
    assert_true(np.corrcoef(data1, data2)[0, 1] > 0.95)
    def test_volsurf_projections(self):
        white = surf.generate_plane((0, 0, 0), (0, 1, 0), (0, 0, 1), 10, 10)
        pial = white + np.asarray([[1, 0, 0]])

        above = pial + np.asarray([[3, 0, 0]])
        vg = volgeom.VolGeom((10, 10, 10), np.eye(4))
        vs = volsurf.VolSurfMaximalMapping(vg, white, pial)

        dx = pial.vertices - white.vertices

        for s, w in ((white, 0), (pial, 1), (above, 4)):
            xyz = s.vertices
            ws = vs.surf_project_weights(True, xyz)
            delta = vs.surf_unproject_weights_nodewise(ws) - xyz
            assert_array_equal(delta, np.zeros((100, 3)))
            assert_true(np.all(w == ws))

        vs = volsurf.VolSurfMaximalMapping(vg, white, pial, nsteps=2)
        n2vs = vs.get_node2voxels_mapping()
        assert_equal(n2vs, dict((i, {i: 0.0, i + 100: 1.0}) for i in xrange(100)))

        nd = 17
        ds_mm_expected = np.sum((above.vertices - pial.vertices[nd, :]) ** 2, 1) ** 0.5
        ds_mm = vs.coordinates_to_grey_distance_mm(nd, above.vertices)
        assert_array_almost_equal(ds_mm_expected, ds_mm)

        ds_mm_nodewise = vs.coordinates_to_grey_distance_mm(True, above.vertices)

        assert_array_equal(ds_mm_nodewise, np.ones((100,)) * 3)
Exemplo n.º 9
0
 def test_filter_proces_with_plain_array_h05(self):
     st = self.test_array
     target = self.targ_h05
     res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st * pq.s,
                               2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
     self.assertNotIsInstance(res, pq.Quantity)
     assert_array_almost_equal(res, target, decimal=3)
Exemplo n.º 10
0
    def test_average_node_edge_length(self):
        for side in xrange(1, 5):
            s_flat = surf.generate_plane((0, 0, 0), (0, 0, 1), (0, 1, 0), 6, 6)
            rnd_xyz = 0 * np.random.normal(size=s_flat.vertices.shape)
            s = surf.Surface(s_flat.vertices + rnd_xyz, s_flat.faces)

            nvertices = s.nvertices

            sd = np.zeros((nvertices,))
            c = np.zeros((nvertices,))

            def d(src, trg, vertices=s.vertices):
                s = vertices[src, :]
                t = vertices[trg, :]

                delta = s - t
                print s, t, delta
                return np.sum(delta ** 2) ** .5

            for i_face in s.faces:
                for i in xrange(3):
                    src = i_face[i]
                    trg = i_face[(i + 1) % 3]

                    sd[src] += d(src, trg)
                    sd[trg] += d(src, trg)
                    c[src] += 1
                    c[trg] += 1

                    print i, src, trg, d(src, trg)

            assert_array_almost_equal(sd / c, s.average_node_edge_length)
Exemplo n.º 11
0
def _scalar_tester(norm_instance, vals):
    """
    Checks if scalars and arrays are handled the same way.
    Tests only for float.
    """
    scalar_result = [norm_instance(float(v)) for v in vals]
    assert_array_almost_equal(scalar_result, norm_instance(vals))
Exemplo n.º 12
0
 def test_MultipleFilterAlgorithm_with_plain_array_h05(self):
     st = self.test_array
     target = [self.targ_h05_dt05]
     res = mft.multiple_filter_test([0.5] * pq.s, st * pq.s, 2.1 * pq.s, 5,
                                    100, dt=0.5 * pq.s)
     self.assertNotIsInstance(res, pq.Quantity)
     assert_array_almost_equal(res, target, decimal=9)
Exemplo n.º 13
0
    def test_surf_normalized(self):

        def assert_is_unit_norm(v):
            assert_almost_equal(1., np.sum(v*v))
            assert_equal(v.shape, (len(v),))

        def assert_same_direction(v,w):
            assert_almost_equal(v.dot(w),(v.dot(v)*w.dot(w))**.5)

        def helper_test_vec_normalized(v):
            v_norm=surf.normalized(v)
            assert_is_unit_norm(v_norm)
            assert_same_direction(v,v_norm)

            return v_norm

        sizes=[(8,),(7,4)]

        for size in sizes:
            v=np.random.normal(size=size)
            if len(size)==1:
                helper_test_vec_normalized(v)
            else:
                # test for vectors and for matrix
                v_n = surf.normalized(v)

                n_vecs=v.shape[1]
                for i in xrange(n_vecs):
                    v_n_i=helper_test_vec_normalized(v[i,:])
                    assert_array_almost_equal(v_n_i, v_n[i,:])
Exemplo n.º 14
0
    def test_surf_fs_asc(self, temp_fn):
        s = surf.generate_sphere(5) * 100

        surf_fs_asc.write(temp_fn, s, overwrite=True)
        t = surf_fs_asc.read(temp_fn)

        assert_array_almost_equal(s.vertices, t.vertices)
        assert_array_almost_equal(s.vertices, t.vertices)

        theta = np.asarray([0, 0., 180.])

        r = s.rotate(theta, unit='deg')

        l2r = surf.get_sphere_left_right_mapping(s, r)
        l2r_expected = [0, 1, 2, 6, 5, 4, 3, 11, 10, 9, 8, 7, 15, 14, 13, 12,
                       16, 19, 18, 17, 21, 20, 23, 22, 26, 25, 24]

        assert_array_equal(l2r, np.asarray(l2r_expected))


        sides_facing = 'apism'
        for side_facing in sides_facing:
            l, r = surf.reposition_hemisphere_pairs(s + 10., t + (-10.),
                                              side_facing)

            m = surf.merge(l, r)

            # not sure at the moment why medial rotation
            # messes up - but leave for now
            eps = 666 if side_facing == 'm' else .001
            assert_true((abs(m.center_of_mass) < eps).all())
Exemplo n.º 15
0
 def test_from_nu(self):
     """ Check conversions from nu to omega and nu to lambda """
     domain = Domain()
     self.assertEqual(nu_to_omega(domain.centre_nu), domain.centre_omega)
     self.assertEqual(nu_to_lambda(domain.centre_nu), domain.centre_lambda)
     assert_array_almost_equal(nu_to_omega(domain.nu), domain.omega)
     assert_array_almost_equal(nu_to_lambda(domain.nu), domain.Lambda)
Exemplo n.º 16
0
    def test_threshold_detection(self):
        # Test whether spikes are extracted at the correct times from
        # an analog signal.  

        # Load membrane potential simulated using Brian2 
        # according to make_spike_extraction_test_data.py.  
        curr_dir = os.path.dirname(os.path.realpath(__file__))
        npz_file_loc = os.path.join(curr_dir,'spike_extraction_test_data.npz')
        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
        data = iom2.read()
        vm = data[0].segments[0].analogsignals[0]
        spike_train = stgen.threshold_detection(vm)
        try:
            len(spike_train)
        except TypeError: # Handles an error in Neo related to some zero length
                          # spike trains being treated as unsized objects.
            warnings.warn(("The spike train may be an unsized object. This may be related "
                            "to an issue in Neo with some zero-length SpikeTrain objects. "
                            "Bypassing this by creating an empty SpikeTrain object."))
            spike_train = neo.core.SpikeTrain([],t_start=spike_train.t_start,
                                                 t_stop=spike_train.t_stop,
                                                 units=spike_train.units)

        # Correct values determined previously.  
        true_spike_train = [0.0123, 0.0354, 0.0712, 0.1191, 
                            0.1694, 0.22, 0.2711]
        
        # Does threshold_detection gives the correct number of spikes?
        self.assertEqual(len(spike_train),len(true_spike_train))
        # Does threshold_detection gives the correct times for the spikes?    
        try:
            assert_array_almost_equal(spike_train,spike_train)
        except AttributeError: # If numpy version too old to have allclose
            self.assertTrue(np.array_equal(spike_train,spike_train))
Exemplo n.º 17
0
 def testGeodeticConversionsArray(self):
     lat, lon = np.mgrid[-89:89:5,-179:179:5]
     x, y, z = geodetic2EcefZero(np.deg2rad(lat), np.deg2rad(lon))
 
     r = ecef2Geodetic(x, y, z)
     #print np.rad2deg(r)
     assert_array_almost_equal(np.rad2deg(r),[lat,lon], 11)
Exemplo n.º 18
0
def test_PowerNorm():
    a = np.array([0, 0.5, 1, 1.5], dtype=np.float)
    pnorm = mcolors.PowerNorm(1)
    norm = mcolors.Normalize()
    assert_array_almost_equal(norm(a), pnorm(a))

    a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float)
    expected = [0, 0, 1/16, 1/4, 1]
    pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
    assert_array_almost_equal(pnorm(a), expected)
    assert_equal(pnorm(a[0]), expected[0])
    assert_equal(pnorm(a[2]), expected[2])
    assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])

    # Clip = True
    a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
    expected = [0, 0, 0, 1, 1]
    pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True)
    assert_array_almost_equal(pnorm(a), expected)
    assert_equal(pnorm(a[0]), expected[0])
    assert_equal(pnorm(a[-1]), expected[-1])

    # Clip = True at call time
    a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
    expected = [0, 0, 0, 1, 1]
    pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False)
    assert_array_almost_equal(pnorm(a, clip=True), expected)
    assert_equal(pnorm(a[0], clip=True), expected[0])
    assert_equal(pnorm(a[-1], clip=True), expected[-1])
Exemplo n.º 19
0
 def testBBMerge(self):
     bb1 = BoundingBox(latSouth=-55, lonWest=95, latNorth=-45, lonEast=109)
     bb2 = BoundingBox(latSouth=44, lonWest=-164, latNorth=74, lonEast=-35)
     bb = BoundingBox.mergedBoundingBoxes([bb1,bb2])
     assert_array_equal([bb.latSouth,bb.latNorth,bb.lonWest,bb.lonEast],
                        [bb1.latSouth,bb2.latNorth,bb1.lonWest,bb2.lonEast])
     assert_array_almost_equal(bb.center, [21.136113246, -150])
Exemplo n.º 20
0
def test_light_source_masked_shading():
    """Array comparison test for a surface with a masked portion. Ensures that
    we don't wind up with "fringes" of odd colors around masked regions."""
    y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
    z = 10 * np.cos(x ** 2 + y ** 2)

    z = np.ma.masked_greater(z, 9.9)

    cmap = plt.cm.copper
    ls = mcolors.LightSource(315, 45)
    rgb = ls.shade(z, cmap)

    # Result stored transposed and rounded for for more compact display...
    expect = np.array(
        [
            [
                [0.00, 0.46, 0.91, 0.91, 0.84, 0.64, 0.29, 0.00],
                [0.46, 0.96, 1.00, 1.00, 1.00, 0.97, 0.67, 0.18],
                [0.91, 1.00, 1.00, 1.00, 1.00, 1.00, 0.96, 0.36],
                [0.91, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.51],
                [0.84, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.44],
                [0.64, 0.97, 1.00, 1.00, 1.00, 1.00, 0.94, 0.09],
                [0.29, 0.67, 0.96, 1.00, 1.00, 0.94, 0.38, 0.01],
                [0.00, 0.18, 0.36, 0.51, 0.44, 0.09, 0.01, 0.00],
            ],
            [
                [0.00, 0.29, 0.61, 0.75, 0.64, 0.41, 0.18, 0.00],
                [0.29, 0.81, 0.95, 0.93, 0.85, 0.68, 0.40, 0.11],
                [0.61, 0.95, 1.00, 0.78, 0.78, 0.77, 0.52, 0.22],
                [0.75, 0.93, 0.78, 0.00, 0.00, 0.78, 0.54, 0.19],
                [0.64, 0.85, 0.78, 0.00, 0.00, 0.78, 0.45, 0.08],
                [0.41, 0.68, 0.77, 0.78, 0.78, 0.55, 0.25, 0.02],
                [0.18, 0.40, 0.52, 0.54, 0.45, 0.25, 0.00, 0.00],
                [0.00, 0.11, 0.22, 0.19, 0.08, 0.02, 0.00, 0.00],
            ],
            [
                [0.00, 0.19, 0.39, 0.48, 0.41, 0.26, 0.12, 0.00],
                [0.19, 0.52, 0.73, 0.78, 0.66, 0.46, 0.26, 0.07],
                [0.39, 0.73, 0.95, 0.50, 0.50, 0.53, 0.30, 0.14],
                [0.48, 0.78, 0.50, 0.00, 0.00, 0.50, 0.23, 0.12],
                [0.41, 0.66, 0.50, 0.00, 0.00, 0.50, 0.11, 0.05],
                [0.26, 0.46, 0.53, 0.50, 0.50, 0.11, 0.03, 0.01],
                [0.12, 0.26, 0.30, 0.23, 0.11, 0.03, 0.00, 0.00],
                [0.00, 0.07, 0.14, 0.12, 0.05, 0.01, 0.00, 0.00],
            ],
            [
                [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
            ],
        ]
    ).T

    assert_array_almost_equal(rgb, expect, decimal=2)
Exemplo n.º 21
0
 def testEllipsoidLineIntersection(self):
     for ellipsoidLineIntersection in ellipsoidLineIntersectionFns:
         p1 = np.array(geodetic2Ecef(np.deg2rad(30),np.deg2rad(60),0))
         p2 = np.array(geodetic2Ecef(np.deg2rad(-30),np.deg2rad(-60),0))
         
         i1 = ellipsoidLineIntersection(wgs84A, wgs84B, p1, [p1-p2], directed=False)
 
         assert_array_almost_equal(i1, [p1])
Exemplo n.º 22
0
    def test_shift2(self):
        # C yields a value based on shifted contexts
        res = []
        for t in self.daterange:
            self.ctx.set_date(t)
            res.append(self.ctx[D])

        assert_array_almost_equal(res, [(1,2,3), (3,5,7), (6,9,12)])
Exemplo n.º 23
0
 def test_from_lambda(self):
     """ Check conversions from lambda to nu and lambda to omega """
     domain = Domain()
     self.assertEqual(lambda_to_nu(domain.centre_lambda), domain.centre_nu)
     self.assertEqual(lambda_to_omega(domain.centre_lambda),
                      domain.centre_omega)
     assert_array_almost_equal(lambda_to_nu(domain.Lambda), domain.nu)
     assert_array_almost_equal(lambda_to_omega(domain.Lambda), domain.omega)
Exemplo n.º 24
0
def test_rgb_hsv_round_trip():
    for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
        np.random.seed(0)
        tt = np.random.random(a_shape)
        assert_array_almost_equal(tt,
            mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
        assert_array_almost_equal(tt,
            mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
Exemplo n.º 25
0
    def test_surface_flatten(self, dim):
        def unit_vec3(dim, scale):
            v = [0, 0, 0]
            v[dim] = float(scale)
            return tuple(v)

        origin = (0, 0, 0)
        plane_size = 10

        scale = 1.
        vec1 = unit_vec3(dim, scale=scale)
        vec2 = unit_vec3((dim + 1) % 3, scale=scale)

        plane = generate_plane(origin, vec1, vec2, plane_size, plane_size)

        noise_level = .05
        nan_vertices_ratio = .05

        # add some noise to spatial coordinates
        vertices = plane.vertices
        noise = np.random.uniform(size=vertices.shape,
                                  low=-.5,
                                  high=.5) * noise_level * scale
        vertices_noisy = vertices + noise

        # make some vertices NaN (as might be the case for flat surfaces)
        nan_count_float = plane.nvertices * nan_vertices_ratio
        nan_count = np.ceil(nan_count_float).astype(np.int)
        nan_vertices = np.random.random_integers(plane.nvertices,
                                                 size=(nan_count,)) - 1
        vertices_noisy[nan_vertices, dim] = np.nan
        plane_noisy = Surface(vertices_noisy, plane.faces)

        # compute normals
        f_normal = plane_noisy.face_normals

        # find average normal

        non_nan_f_normal = np.logical_not(np.any(np.isnan(f_normal), axis=1))
        f_normal_avg = np.mean(f_normal[non_nan_f_normal], axis=0)

        # test average normal
        assert_array_almost_equal(plane.nanmean_face_normal, f_normal_avg,
                                  decimal=2)

        # the output has only x and y coordinates; with z-coordinates set
        # to zero, the coordinates must be at similar pairwise distances
        max_deformation = .1
        x, y = flat_surface2xy(plane_noisy, max_deformation)
        n_vertices = plane.nvertices
        z = np.zeros((n_vertices,))
        flat_xyz = np.asarray((x, y, z))

        # nodes are rotated must have same pairwise distance as
        # the original surface
        max_difference = 3 * noise_level
        SurfingSurfaceTests.assert_coordinates_almost_equal_modulo_rotation(
            flat_xyz.T, plane.vertices, max_difference)
Exemplo n.º 26
0
 def testArray(self):
     f = ufunc_mod.UnaryCallable()
     a = numpy.arange(5, dtype=float)
     b = f(a)
     assert_array_almost_equal(b, a*2.0) 
     c = numpy.zeros(5, dtype=float)
     d = f(a,output=c)
     self.assertTrue(c is d)
     assert_array_almost_equal(d, a*2.0) 
Exemplo n.º 27
0
def test_colormap_reversing(name):
    """Check the generated _lut data of a colormap and corresponding
    reversed colormap if they are almost the same."""
    cmap = plt.get_cmap(name)
    cmap_r = cmap.reversed()
    if not cmap_r._isinit:
        cmap._init()
        cmap_r._init()
    assert_array_almost_equal(cmap._lut[:-3], cmap_r._lut[-4::-1])
Exemplo n.º 28
0
    def assert_array_almost_equal(self, a, b):
        """Assertion that two arrays compare almost equal.

        The arrays are converted to :class:`numpy.ma.MaskedArray` using
        the :meth:`_tomasked` method of the test class before the
        comparison is made.

        """
        assert_array_almost_equal(self._tomasked(a), self._tomasked(b))
Exemplo n.º 29
0
    def test_training(self):
        inputs = np.array([[0, 0], [1, 1], [2, 2]])
        targets = np.array([[0], [1], [2]])
        data_set = NumericalDataSet(inputs, targets)

        lin_reg = SciPyLinReg(SciPyLinReg.ORDINARY)
        lin_reg.train(data_set)

        assert_array_almost_equal([0.5, 0.5], lin_reg.get_params())
Exemplo n.º 30
0
 def test_basic_ungridded_to_gridded_collocation(self):
     gd_copy = self.gd.copy()
     res = self.ug.collocated_onto(self.gd)
     assert self.gd == gd_copy
     res_1 = self.gd.sampled_from(self.ug)
     # The two data results should be identical, although the metadata is slightly different for some reason
     assert_array_almost_equal(res[0].data, res_1[0].data)
     # This dataset should still be the same as the alternative one (this checks data and metadata)
     assert self.ug == self.ug_1
Exemplo n.º 31
0
 def test_spike_triggered_average_with_n_spikes_on_constant_function(self):
     '''Signal should average to the input'''
     const = 13.8
     x = const * np.ones(201)
     asiga = AnalogSignal(
         np.array([x]).T, units='mV', sampling_rate=10 / ms)
     st = SpikeTrain([3, 5.6, 7, 7.1, 16, 16.3], units='ms', t_stop=20)
     window_starttime = -2 * ms
     window_endtime = 2 * ms
     STA = sta.spike_triggered_average(
         asiga, st, (window_starttime, window_endtime))
     a = int(((window_endtime - window_starttime) *
             asiga.sampling_rate).simplified)
     cutout = asiga[0: a]
     cutout.t_start = window_starttime
     assert_array_almost_equal(STA, cutout, 12)
Exemplo n.º 32
0
def test_average_pooling_feature_map_fprop():
    """
    Test that AveragePoolingFeatureMap really does what we think it does
    on a forward pass.
    """
    apfmap = AveragePoolingFeatureMap((2, 2), (4, 4))
    apfmap.biases[:] = 0.
    apfmap.weights[:] = 1.
    inp = np.arange(1, 17).reshape((4, 4))
    out = apfmap.fprop(inp)
    assert_array_almost_equal(
        out,
        1.7159 * np.tanh(2. / 3. * np.array([[(1 + 2 + 5 + 6) / 4.,
                                              (3 + 4 + 7 + 8) / 4.],
                                             [(9 + 10 + 13 + 14) / 4.,
                                              (11 + 12 + 15 + 16) / 4.]])))
Exemplo n.º 33
0
    def test_silhouette_metric(self):
        """
        Test the silhouette metric of the k-elbow visualizer
        """
        visualizer = KElbowVisualizer(KMeans(random_state=0),
                                      k=5,
                                      metric="silhouette",
                                      timings=False)
        visualizer.fit(X)

        expected = np.array([0.691636, 0.456646, 0.255174, 0.239842])
        assert len(visualizer.k_scores_) == 4

        visualizer.poof()
        self.assert_images_similar(visualizer)
        assert_array_almost_equal(visualizer.k_scores_, expected)
Exemplo n.º 34
0
def test_multi_convolutional_feature_map_singleplane_bprop():
    size = (20, 20)
    elems = np.prod(size)
    fsize = (5, 5)
    osize = (16, 16)
    mfmap = MultiConvolutionalFeatureMap(fsize, size, 1)
    mfmap.initialize()
    in1 = random.normal(size=size)
    dout = np.ones(osize)
    bprop = lambda inp: mfmap.bprop(dout, inp)
    grad1 = lambda var: bprop((var.reshape(size), ))[0].reshape(elems)
    func1 = lambda var: mfmap.fprop((var.reshape(size), )).sum()
    varied_input = random.normal(size=size)
    fd_grad1 = fd_grad(func1, varied_input.reshape(elems), 1e-4)
    real_grad1 = grad1(varied_input)
    assert_array_almost_equal(fd_grad1, real_grad1)
Exemplo n.º 35
0
    def test_distortion_metric(self):
        """
        Test the distortion metric of the k-elbow visualizer
        """
        visualizer = KElbowVisualizer(KMeans(random_state=0),
                                      k=5,
                                      metric="distortion",
                                      timings=False)
        visualizer.fit(X)

        expected = np.array([7.677785, 8.364319, 8.893634, 8.013021])
        assert len(visualizer.k_scores_) == 4

        visualizer.poof()
        self.assert_images_similar(visualizer)
        assert_array_almost_equal(visualizer.k_scores_, expected)
Exemplo n.º 36
0
def test_colormap_reversing(name):
    """Check the generated _lut data of a colormap and corresponding
    reversed colormap if they are almost the same."""
    should_have_warning = {
        'spectral', 'spectral_r', 'Vega10', 'Vega10_r', 'Vega20', 'Vega20_r',
        'Vega20b', 'Vega20b_r', 'Vega20c', 'Vega20c_r'
    }
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cmap = plt.get_cmap(name)
    assert len(w) == (1 if name in should_have_warning else 0)
    cmap_r = cmap.reversed()
    if not cmap_r._isinit:
        cmap._init()
        cmap_r._init()
    assert_array_almost_equal(cmap._lut[:-3], cmap_r._lut[-4::-1])
Exemplo n.º 37
0
    def test_corrcoef_binned_short_input(self):
        '''
        Test if input list of one binned spike train yields 1.0.
        '''
        # Calculate correlation
        binned_st = conv.BinnedSpikeTrain(self.st_0,
                                          t_start=0 * pq.ms,
                                          t_stop=50. * pq.ms,
                                          binsize=1 * pq.ms)
        result = sc.corrcoef(binned_st, fast=False)
        target = np.array(1.)

        # Check result and dimensionality of result
        self.assertEqual(result.ndim, 0)
        assert_array_almost_equal(result, target)
        assert_array_almost_equal(result, sc.corrcoef(binned_st, fast=True))
Exemplo n.º 38
0
    def test_empty_spike_train(self):
        """
        Test whether a warning is yielded in case of empty spike train.
        Also check correctness of the output array.
        """
        # st_2 is empty
        binned_12 = conv.BinnedSpikeTrain([self.st_1, self.st_2],
                                          bin_size=1 * pq.ms)

        with self.assertWarns(UserWarning):
            result = sc.correlation_coefficient(binned_12, fast=False)

        # test for NaNs in the output array
        target = np.zeros((2, 2)) * np.NaN
        target[0, 0] = 1.0
        assert_array_almost_equal(result, target)
Exemplo n.º 39
0
def test_light_source_masked_shading():
    """Array comparison test for a surface with a masked portion. Ensures that
    we don't wind up with "fringes" of odd colors around masked regions."""
    y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
    z = 10 * np.cos(x**2 + y**2)

    z = np.ma.masked_greater(z, 9.9)

    cmap = plt.cm.copper
    ls = mcolors.LightSource(315, 45)
    rgb = ls.shade(z, cmap)

    # Result stored transposed and rounded for for more compact display...
    expect = np.array([[[0.90, 0.88, 0.91, 0.91, 0.84, 0.64, 0.36, 0.00],
                        [0.88, 0.96, 1.00, 1.00, 1.00, 0.97, 0.64, 0.18],
                        [0.91, 1.00, 1.00, 1.00, 1.00, 1.00, 0.74, 0.34],
                        [0.91, 1.00, 1.00, 0.00, 0.00, 1.00, 0.52, 0.30],
                        [0.84, 1.00, 1.00, 0.00, 0.00, 1.00, 0.25, 0.13],
                        [0.64, 0.97, 1.00, 1.00, 1.00, 0.23, 0.07, 0.03],
                        [0.36, 0.64, 0.74, 0.52, 0.25, 0.07, 0.00, 0.01],
                        [0.00, 0.18, 0.34, 0.30, 0.13, 0.03, 0.01, 0.00]],
                       [[0.90, 0.82, 0.85, 0.82, 0.68, 0.46, 0.24, 0.00],
                        [0.82, 0.91, 0.95, 0.93, 0.85, 0.68, 0.39, 0.10],
                        [0.85, 0.95, 1.00, 0.78, 0.78, 0.77, 0.42, 0.18],
                        [0.82, 0.93, 0.78, 0.00, 0.00, 0.78, 0.30, 0.15],
                        [0.68, 0.85, 0.78, 0.00, 0.00, 0.78, 0.13, 0.06],
                        [0.46, 0.68, 0.77, 0.78, 0.78, 0.13, 0.03, 0.01],
                        [0.24, 0.39, 0.42, 0.30, 0.13, 0.03, 0.00, 0.00],
                        [0.00, 0.10, 0.18, 0.15, 0.06, 0.01, 0.00, 0.00]],
                       [[0.90, 0.79, 0.81, 0.76, 0.58, 0.35, 0.17, 0.00],
                        [0.79, 0.88, 0.92, 0.88, 0.73, 0.50, 0.24, 0.05],
                        [0.81, 0.92, 1.00, 0.50, 0.50, 0.53, 0.22, 0.09],
                        [0.76, 0.88, 0.50, 0.00, 0.00, 0.50, 0.12, 0.05],
                        [0.58, 0.73, 0.50, 0.00, 0.00, 0.50, 0.03, 0.01],
                        [0.35, 0.50, 0.53, 0.50, 0.50, 0.02, 0.00, 0.00],
                        [0.17, 0.24, 0.22, 0.12, 0.03, 0.00, 0.00, 0.00],
                        [0.00, 0.05, 0.09, 0.05, 0.01, 0.00, 0.00, 0.00]],
                       [[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]]).T

    assert_array_almost_equal(rgb, expect, decimal=2)
Exemplo n.º 40
0
def test_light_source_shading_default():
    """Array comparison test for the default "hsv" blend mode. Ensure the
    default result doesn't change without warning."""
    y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
    z = 10 * np.cos(x**2 + y**2)

    cmap = plt.cm.copper
    ls = mcolors.LightSource(315, 45)
    rgb = ls.shade(z, cmap)

    # Result stored transposed and rounded for for more compact display...
    expect = np.array([[[0.87, 0.85, 0.90, 0.90, 0.82, 0.62, 0.34, 0.00],
                        [0.85, 0.94, 0.99, 1.00, 1.00, 0.96, 0.62, 0.17],
                        [0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.71, 0.33],
                        [0.90, 1.00, 1.00, 1.00, 1.00, 0.98, 0.51, 0.29],
                        [0.82, 1.00, 1.00, 1.00, 1.00, 0.64, 0.25, 0.13],
                        [0.62, 0.96, 1.00, 0.98, 0.64, 0.22, 0.06, 0.03],
                        [0.34, 0.62, 0.71, 0.51, 0.25, 0.06, 0.00, 0.01],
                        [0.00, 0.17, 0.33, 0.29, 0.13, 0.03, 0.01, 0.00]],

                       [[0.87, 0.79, 0.83, 0.80, 0.66, 0.44, 0.23, 0.00],
                        [0.79, 0.88, 0.93, 0.92, 0.83, 0.66, 0.38, 0.10],
                        [0.83, 0.93, 0.99, 1.00, 0.92, 0.75, 0.40, 0.18],
                        [0.80, 0.92, 1.00, 0.99, 0.93, 0.75, 0.28, 0.14],
                        [0.66, 0.83, 0.92, 0.93, 0.87, 0.44, 0.12, 0.06],
                        [0.44, 0.66, 0.75, 0.75, 0.44, 0.12, 0.03, 0.01],
                        [0.23, 0.38, 0.40, 0.28, 0.12, 0.03, 0.00, 0.00],
                        [0.00, 0.10, 0.18, 0.14, 0.06, 0.01, 0.00, 0.00]],

                       [[0.87, 0.75, 0.78, 0.73, 0.55, 0.33, 0.16, 0.00],
                        [0.75, 0.85, 0.90, 0.86, 0.71, 0.48, 0.23, 0.05],
                        [0.78, 0.90, 0.98, 1.00, 0.82, 0.51, 0.21, 0.08],
                        [0.73, 0.86, 1.00, 0.97, 0.84, 0.47, 0.11, 0.05],
                        [0.55, 0.71, 0.82, 0.84, 0.71, 0.20, 0.03, 0.01],
                        [0.33, 0.48, 0.51, 0.47, 0.20, 0.02, 0.00, 0.00],
                        [0.16, 0.23, 0.21, 0.11, 0.03, 0.00, 0.00, 0.00],
                        [0.00, 0.05, 0.08, 0.05, 0.01, 0.00, 0.00, 0.00]],

                       [[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
                        [1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]]).T
    assert_array_almost_equal(rgb, expect, decimal=2)
Exemplo n.º 41
0
def test_min_step_generator_with_base_step01():
    desired = 0.1
    step_gen = nd.MinStepGenerator(base_step=desired, num_steps=1, offset=0)
    methods = ['forward', 'backward', 'central', 'complex']
    for n in range(1, 5):
        for order in [1, 2, 4, 6, 8]:
            min_length = n + order - 1
            lengths = [
                min_length, min_length,
                max(min_length // 2, 1),
                max(min_length // 4, 1)
            ]
            for m, method in zip(lengths, methods):
                h = [h for h in step_gen(0, method=method, n=n, order=order)]
                # print(len(h), n, order, method)
                assert_array_almost_equal((h[-1] - desired) / desired, 0)
                assert_equal(m, len(h))
    def test_corrcoef_binned_same_spiketrains(self):
        '''
        Test if the correlation coefficient between two identical binned spike
        trains evaluates to a 2x2 matrix of ones.
        '''
        # Calculate correlation
        binned_st = conv.BinnedSpikeTrain(
            [self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
            binsize=1 * pq.ms)
        result = sc.corrcoef(binned_st, fast=False)
        target = np.ones((2, 2))

        # Check dimensions
        self.assertEqual(len(result), 2)
        # Check result
        assert_array_almost_equal(result, target)
        assert_array_almost_equal(result, sc.corrcoef(binned_st, fast=True))
Exemplo n.º 43
0
def test_SymLogNorm():
    """
    Test SymLogNorm behavior
    """
    norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
    vals = np.array([-30, -1, 2, 6], dtype=float)
    normed_vals = norm(vals)
    expected = [0., 0.53980074, 0.826991, 1.02758204]
    assert_array_almost_equal(normed_vals, expected)
    _inverse_tester(norm, vals)
    _scalar_tester(norm, vals)
    _mask_tester(norm, vals)

    # Ensure that specifying vmin returns the same result as above
    norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
    normed_vals = norm(vals)
    assert_array_almost_equal(normed_vals, expected)
Exemplo n.º 44
0
    def testBBPole(self):
        # north pole
        bb = BoundingBox(latSouth=60, lonWest=-180, latNorth=90, lonEast=180)
        assert_array_almost_equal(bb.center, [90, 0])
        assert_array_almost_equal(bb.size, [6695.78581964, 6695.78581964])

        # south pole
        bb = BoundingBox(latSouth=-90, lonWest=-180, latNorth=-60, lonEast=180)
        assert_array_almost_equal(bb.center, [-90, 0])
        assert_array_almost_equal(bb.size, [6695.78581964, 6695.78581964])
Exemplo n.º 45
0
def test_Function():

    p = pd.read_csv('../../orcl_2000.csv', index_col=[0], parse_dates=True)
    p.columns = [str.lower(col) for col in p.columns]
    # func = Function('ACD')
    # ret = func(p)
    ret = ta.ACD(p)
    print('length of ret is {0}, NA number is {1}'.format(
        len(ret),
        ret.isnull().sum()))

    benchmark_ret = benchmark.ACD(p)
    # benchmark_ret = talib.abstract.AROON(p)
    print('length of ret is {0}, NA number is {1}'.format(
        len(benchmark_ret),
        benchmark_ret.isnull().sum()))

    assert_array_almost_equal(ret.dropna(), benchmark_ret.dropna())
Exemplo n.º 46
0
    def test_geometric_models(self):
        np.random.seed(10)

        periods = 1000
        price = 70.0
        mu = 0.05
        sigma = 0.3
        period_duration = 1.0

        np.random.seed(10)
        iterative_ret = bm.generate_gbm_prices(periods, price, mu, sigma,
                                               period_duration)
        np.random.seed(10)
        vectorised_ret = bm.generate_gbm_prices_vec(periods, price, mu, sigma,
                                                    period_duration)

        # Equal to 6 decimal places
        np_utils.assert_array_almost_equal(iterative_ret, vectorised_ret)
Exemplo n.º 47
0
 def testArray(self):
     f = ufunc_ext.BinaryCallable()
     a = numpy.random.randn(5)
     b = numpy.random.randn(5)
     assert_array_almost_equal(f(a,b), (a*2+b*3))
     c = numpy.zeros(5, dtype=float)
     d = f(a,b,output=c)
     self.assert_(c is d)
     assert_array_almost_equal(d, a*2 + b*3)
     assert_array_almost_equal(f(a, 2.0), a*2 + 6.0)
     assert_array_almost_equal(f(1.0, b), 2.0 + b*3)
Exemplo n.º 48
0
    def test_zscore_list_dup(self):
        '''
        Test zscore on a list of AnalogSignalArray objects, asking to return a
        duplicate.
        '''
        signal1 = neo.AnalogSignalArray(np.transpose(
            np.vstack([self.test_seq1, self.test_seq1])),
                                        units='mV',
                                        t_start=0. * pq.ms,
                                        sampling_rate=1000. * pq.Hz,
                                        dtype=float)
        signal2 = neo.AnalogSignalArray(np.transpose(
            np.vstack([self.test_seq1, self.test_seq2])),
                                        units='mV',
                                        t_start=0. * pq.ms,
                                        sampling_rate=1000. * pq.Hz,
                                        dtype=float)
        signal_list = [signal1, signal2]

        m = np.mean(np.hstack([self.test_seq1, self.test_seq1]))
        s = np.std(np.hstack([self.test_seq1, self.test_seq1]))
        target11 = (self.test_seq1 - m) / s
        target21 = (self.test_seq1 - m) / s
        m = np.mean(np.hstack([self.test_seq1, self.test_seq2]))
        s = np.std(np.hstack([self.test_seq1, self.test_seq2]))
        target12 = (self.test_seq1 - m) / s
        target22 = (self.test_seq2 - m) / s

        # Call elephant function
        result = elephant.signal_processing.zscore(signal_list, inplace=False)

        assert_array_almost_equal(result[0].magnitude,
                                  np.transpose(np.vstack([target11,
                                                          target12])),
                                  decimal=9)
        assert_array_almost_equal(result[1].magnitude,
                                  np.transpose(np.vstack([target21,
                                                          target22])),
                                  decimal=9)

        # Assert original signal is untouched
        self.assertEqual(signal1.magnitude[0, 0], self.test_seq1[0])
        self.assertEqual(signal2.magnitude[0, 1], self.test_seq2[0])
Exemplo n.º 49
0
    def test_calinski_harabaz_metric(self):
        """
        Test the calinski-harabaz metric of the k-elbow visualizer
        """
        visualizer = KElbowVisualizer(KMeans(random_state=0),
                                      k=5,
                                      metric="calinski_harabaz",
                                      timings=False)
        visualizer.fit(X)

        expected = np.array([
            81.662726256035683, 50.992378259195554, 40.952179227847012,
            35.939494
        ])

        self.assertEqual(len(visualizer.k_scores_), 4)
        visualizer.poof()
        self.assert_images_similar(visualizer)
        assert_array_almost_equal(visualizer.k_scores_, expected)
    def test_zscore_single_multidim_inplace(self):
        """
        Test z-score on a single AnalogSignal with multiple dimensions, asking
        for an inplace operation.
        """
        signal = neo.AnalogSignal(
            np.vstack([self.test_seq1, self.test_seq2]), units='mV',
            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)

        m = np.mean(signal.magnitude, axis=0, keepdims=True)
        s = np.std(signal.magnitude, axis=0, keepdims=True)
        target = (signal.magnitude - m) / s

        assert_array_almost_equal(
            elephant.signal_processing.zscore(
                signal, inplace=True).magnitude, target, decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal[0, 0].magnitude, target[0, 0])
Exemplo n.º 51
0
 def test_cross_correlation_env(self):
     '''
     Envelope of sine vs cosine
     '''
     # Sine with phase shift phi vs cosine for different frequencies
     nlags = 800  # nlags need to be smaller than N/2 b/c border effects
     signal = np.zeros((self.n_samples, 2))
     signal[:, 0] = 0.2 * np.sin(2. * np.pi * self.freq * self.time)
     signal[:, 1] = 5.3 * np.cos(2. * np.pi * self.freq * self.time)
     # Convert signal to neo.AnalogSignal
     signal = neo.AnalogSignal(signal,
                               units='mV',
                               t_start=0. * pq.ms,
                               sampling_rate=self.sampling_rate,
                               dtype=float)
     env = elephant.signal_processing.cross_correlation_function(
         signal, [0, 1], nlags=nlags, env=True)
     # Envelope should be one for sinusoidal function
     assert_array_almost_equal(env, np.ones_like(env), decimal=2)
    def test_zscore_single_inplace_int(self):
        """
        Test if the z-score is correctly calculated even if the input is an
        AnalogSignal of type int, asking for an inplace operation.
        """
        signal = neo.AnalogSignal(
            self.test_seq1, units='mV',
            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=int)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        assert_array_almost_equal(
            elephant.signal_processing.zscore(signal, inplace=True).magnitude,
            target.reshape(-1, 1).astype(int), decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal[0].magnitude, target.astype(int)[0])
    def test_wavelet_phase(self):
        """
        Tests phase properties of the obtained wavelet transform
        """
        # check that the phase of WT is (almost) same as that of the original
        # sinusoid
        wt = elephant.signal_processing.wavelet_transform(self.test_data,
                                                          self.test_freq1)
        phase = np.angle(wt[int(len(wt)/3):int(len(wt)//3*2), 0])
        true_phase = self.true_phase1[int(len(wt)/3):int(len(wt)//3*2)]
        assert_array_almost_equal(np.exp(1j*phase), np.exp(1j*true_phase),
                                  decimal=6)

        # check that zero padding hardly affect the result
        wt_padded = elephant.signal_processing.wavelet_transform(
            self.test_data, self.test_freq1, zero_padding=False)
        phase_padded = np.angle(wt_padded[int(len(wt)/3):int(len(wt)//3*2), 0])
        assert_array_almost_equal(np.exp(1j*phase_padded), np.exp(1j*phase),
                                  decimal=9)
Exemplo n.º 54
0
    def assert_output_data_almost_equal(self, component_names):
        for component in component_names:
            self.assertIn(component, self.found_components)

            expected_data = self.expected_sim.output.continuous_data[component]
            data = self.output_sim.output.continuous_data[component]

            assert_array_almost_equal(expected_data.x_data,
                                      data.x_data,
                                      decimal=5)

            if np.issubdtype(data.y_data.dtype, np.number):
                assert_array_almost_equal(expected_data.y_data,
                                          data.y_data,
                                          decimal=5)
            else:
                # These are arrays of strings. Compare them as lists:
                self.assertEqual(expected_data.y_data.tolist(),
                                 data.y_data.tolist())
Exemplo n.º 55
0
def test_light_source_shading_color_range():
    # see also
    #http://matplotlib.org/examples/pylab_examples/shading_example.html

    from matplotlib.colors import LightSource
    from matplotlib.colors import Normalize

    refinput = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
    norm = Normalize(vmin=0, vmax=50)
    ls = LightSource(azdeg=0, altdeg=65)
    testoutput = ls.shade(refinput, plt.cm.jet, norm=norm)
    refoutput = np.array([[[0., 0., 0.58912656, 1.], [0., 0., 0.67825312, 1.],
                           [0., 0., 0.76737968, 1.], [0., 0., 0.85650624, 1.]],
                          [[0., 0., 0.9456328, 1.], [0., 0., 1., 1.],
                           [0., 0.04901961, 1., 1.], [0., 0.12745098, 1., 1.]],
                          [[0., 0.22156863, 1., 1.], [0., 0.3, 1., 1.],
                           [0., 0.37843137, 1., 1.], [0., 0.45686275, 1.,
                                                      1.]]])
    assert_array_almost_equal(refoutput, testoutput)
Exemplo n.º 56
0
    def test_zscore_list_inplace(self):
        """
        Test zscore on a list of AnalogSignal objects, asking for an
        inplace operation.
        """
        signal1 = neo.AnalogSignal(np.transpose(
            np.vstack([self.test_seq1, self.test_seq1])),
                                   units='mV',
                                   t_start=0. * pq.ms,
                                   sampling_rate=1000. * pq.Hz,
                                   dtype=float)
        signal2 = neo.AnalogSignal(np.transpose(
            np.vstack([self.test_seq1, self.test_seq2])),
                                   units='mV',
                                   t_start=0. * pq.ms,
                                   sampling_rate=1000. * pq.Hz,
                                   dtype=float)
        signal_list = [signal1, signal2]

        m = np.mean(np.hstack([self.test_seq1, self.test_seq1]))
        s = np.std(np.hstack([self.test_seq1, self.test_seq1]))
        target11 = (self.test_seq1 - m) / s
        target21 = (self.test_seq1 - m) / s
        m = np.mean(np.hstack([self.test_seq1, self.test_seq2]))
        s = np.std(np.hstack([self.test_seq1, self.test_seq2]))
        target12 = (self.test_seq1 - m) / s
        target22 = (self.test_seq2 - m) / s

        # Call elephant function
        result = elephant.signal_processing.zscore(signal_list, inplace=True)

        assert_array_almost_equal(result[0].magnitude,
                                  np.transpose(np.vstack([target11,
                                                          target12])),
                                  decimal=9)
        assert_array_almost_equal(result[1].magnitude,
                                  np.transpose(np.vstack([target21,
                                                          target22])),
                                  decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal1[0, 0].magnitude, target11[0])
        self.assertEqual(signal2[0, 0].magnitude, target21[0])
Exemplo n.º 57
0
    def test_distortion_metric(self):
        """
        Test the distortion metric of the k-elbow visualizer
        """
        visualizer = KElbowVisualizer(
            KMeans(random_state=0),
            k=5,
            metric="distortion",
            timings=False,
            locate_elbow=False,
        )
        visualizer.fit(self.clusters.X)

        expected = np.array([69.100065, 54.081571, 43.146921, 34.978487])
        assert len(visualizer.k_scores_) == 4

        visualizer.finalize()
        self.assert_images_similar(visualizer, tol=0.03)
        assert_array_almost_equal(visualizer.k_scores_, expected)
Exemplo n.º 58
0
def test_buffering_read_write():
    assert not os.path.exists('testfile.tar')
    try:
        df = pandas.DataFrame(numpy.random.normal(size=(100000, 10)),
                              columns=['x%d' % i for i in range(10)])
        writer = PandasBufferingStreamWriter('testfile.tar',
                                             max_chunk_cells=1000000)
        for _, row in df.iterrows():
            writer.write_row(row)
        writer.close()
        reader = PandasBufferingStreamReader('testfile.tar')
        new_values = []
        for row in reader:
            new_values.append(row)
        new_df = pandas.DataFrame(new_values)
        assert_array_almost_equal(numpy.asarray(df), numpy.asarray(new_df))
        assert_list_equal(list(df.columns), list(new_df.columns))
    finally:
        os.remove('testfile.tar')
Exemplo n.º 59
0
def test_grid_search_iid():
    # test the iid parameter
    # noise-free simple 2d-data
    X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]],
                      random_state=0,
                      cluster_std=0.1,
                      shuffle=False,
                      n_samples=80)
    # split dataset into two folds that are not iid
    # first one contains data of all 4 blobs, second only from two.
    mask = np.ones(X.shape[0], dtype=np.bool)
    mask[np.where(y == 1)[0][::2]] = 0
    mask[np.where(y == 2)[0][::2]] = 0
    # this leads to perfect classification on one fold and a score of 1/3 on
    # the other
    svm = SVC(kernel='linear')
    # create "cv" for splits
    cv = [[mask, ~mask], [~mask, mask]]
    # once with iid=True (default)
    grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
    grid_search.fit(X, y)
    first = grid_search.grid_scores_[0]
    tm.assert_equal(first.parameters['C'], 1)
    tm.assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
    # for first split, 1/4 of dataset is in test, for second 3/4.
    # take weighted average
    tm.assert_almost_equal(first.mean_validation_score,
                           1 * 1. / 4. + 1. / 3. * 3. / 4.)

    # once with iid=False
    grid_search = GridSearchCV(svm,
                               param_grid={'C': [1, 10]},
                               cv=cv,
                               iid=False)
    grid_search.fit(X, y)
    first = grid_search.grid_scores_[0]
    assert first.parameters['C'] == 1
    # scores are the same as above
    tm.assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
    # averaged score is just mean of scores
    tm.assert_almost_equal(first.mean_validation_score,
                           np.mean(first.cv_validation_scores))
Exemplo n.º 60
0
    def test_rbf_kernel_forward(self):
        '''Make sure we know the RBF kernel is working'''
        # Test basic functionality
        x1 = np.array([0.0, 0.0])
        x2 = np.array([1.0, 1.0])
        log_l = np.log(2.0)
        eps = 1e-5

        sq_dist = ((x2 - x1)**2).sum()
        expected = np.exp(-1.0 / np.exp(log_l) * sq_dist)

        x1_var = make_torch_variable([x1], requires_grad=False)
        x2_var = make_torch_variable([x2], requires_grad=False)
        log_l_var = make_torch_variable([log_l], requires_grad=True)

        test = rbf_kernel_forward(x1_var, x2_var, log_l_var, eps=eps)

        assert_array_almost_equal(expected, test.data.numpy()[0, 0], decimal=5)

        # Make sure the gradient gets through

        test.sum().backward()
        self.assertIsNotNone(log_l_var.grad)

        # Test safety valve

        bad_log_l = -1e6

        expected_bad = np.exp(-1.0 / eps * sq_dist)

        bad_log_l_var = make_torch_variable([bad_log_l], requires_grad=True)

        test_bad = rbf_kernel_forward(x1_var, x2_var, bad_log_l_var, eps=eps)

        assert_array_almost_equal(expected_bad,
                                  test_bad.data.numpy()[0, 0],
                                  decimal=5)

        # Make sure the gradient gets through

        test_bad.sum().backward()
        self.assertIsNotNone(bad_log_l_var.grad)