Example #1
0
 def check_read(self, example, a, info):
     f = open(self.fn, 'w')
     f.write(example)
     f.close()
     assert_equal(mminfo(self.fn), info)
     b = mmread(self.fn).todense()
     assert_array_almost_equal(a, b)
Example #2
0
 def test_2d_complex_same(self):
     a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]])
     c = signal.fftconvolve(a,a)
     d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],\
                [10j,44j,118j,156j,122j],\
                [3+4j,10+20j,21+56j,18+76j,11+60j]])
     assert_array_almost_equal(c,d)
Example #3
0
def test_degenerate_drainage():
    """
    This "hourglass" configuration should be one of the hardest to correctly
    re-route.
    """
    mg = RasterModelGrid(9, 5)
    z_init = mg.node_x.copy()*0.0001 + 1.
    lake_pits = np.array([7, 11, 12, 13, 17, 27, 31, 32, 33, 37])
    z_init[lake_pits] = -1.
    z_init[22] = 0.  # the common spill pt for both lakes
    z_init[21] = 0.1  # an adverse bump in the spillway
    z_init[20] = -0.2  # the spillway
    z = mg.add_field('node', 'topographic__elevation', z_init)

    fr = FlowRouter(mg)
    lf = DepressionFinderAndRouter(mg)
    fr.route_flow()
    lf.map_depressions()

    correct_A = np.array([ 0.,   0.,   0.,   0.,   0.,
                           0.,   1.,   3.,   1.,   0.,
                           0.,   5.,   1.,   2.,   0.,
                           0.,   1.,  10.,   1.,   0.,
                          21.,  21.,   1.,   1.,   0.,
                           0.,   1.,   9.,   1.,   0.,
                           0.,   3.,   1.,   2.,   0.,
                           0.,   1.,   1.,   1.,   0.,
                           0.,   0.,   0.,   0.,   0.])
    
    thelake = np.concatenate((lake_pits, [22])).sort()

    assert_array_almost_equal(mg.at_node['drainage_area'], correct_A)
Example #4
0
 def test_changing_nthreads_01_dec(self):
     a = linspace(-1, 1, 1e6)
     b = ((.25*a + .75)*a - 1.5)*a - 2
     for nthreads in range(6, 1, -1):
         numexpr.set_num_threads(nthreads)
         c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
         assert_array_almost_equal(b, c)
Example #5
0
def test_D8_D4_fill():
    """
    Tests the functionality of D4 filling.
    """
    lfD8.map_depressions(pits=None, reroute_flow=False)
    lfD4.map_depressions(pits=None, reroute_flow=False)
    assert_equal(lfD8.number_of_lakes, 1)
    assert_equal(lfD4.number_of_lakes, 3)
    
    correct_D8_lake_map = np.empty(7*7, dtype=int)
    correct_D8_lake_map.fill(XX)
    correct_D8_lake_map[lake_nodes] = 10
    correct_D4_lake_map = correct_D8_lake_map.copy()
    correct_D4_lake_map[lake_nodes[5:]] = 32
    correct_D4_lake_map[lake_nodes[-2]] = 38
    correct_D8_depths = np.zeros(7*7, dtype=float)
    correct_D8_depths[lake_nodes] = 2.
    correct_D4_depths = correct_D8_depths.copy()
    correct_D4_depths[lake_nodes[5:]] = 4.
    correct_D4_depths[lake_nodes[-2]] = 3.
    
    assert_array_equal(lfD8.lake_map, correct_D8_lake_map)
    assert_array_equal(lfD4.lake_map, correct_D4_lake_map)
    
    assert_array_almost_equal(mg1.at_node['depression__depth'],
                              correct_D8_depths)
    assert_array_almost_equal(mg2.at_node['depression__depth'],
                              correct_D4_depths)
Example #6
0
def test_changing_slopes():
    """
    Test with the output from a successful run of fr.route_flow.
    """
    slope_old = np.array(
        [ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
          0.        ,  0.        ,  0.        ,  2.        ,  2.        ,
          2.        ,  2.        ,  2.        ,  0.        ,  0.        ,
          2.        ,  0.1       ,  0.        ,  0.1       ,  2.        ,
          0.        ,  0.        ,  2.        ,  0.14142136,  0.1       ,
          0.14142136,  2.        ,  0.        ,  0.        ,  2.        ,
          1.2       ,  1.        ,  1.        ,  2.        ,  0.        ,
          0.        ,  1.06066017,  1.1       ,  1.06066017,  1.        ,
          1.        ,  0.        ,  0.        ,  0.        ,  0.        ,
          0.        ,  0.        ,  0.        ,  0.        ])
    slope_new = np.array(
        [ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
          0.        ,  0.        ,  0.        ,  2.        ,  2.        ,
          2.        ,  2.        ,  2.        ,  0.        ,  0.        ,
          2.        ,  0.        ,  0.        ,  0.        ,  2.        ,
          0.        ,  0.        ,  2.        ,  0.        ,  0.        ,
          0.        ,  2.        ,  0.        ,  0.        ,  2.        ,
          1.2       ,  1.        ,  1.        ,  2.        ,  0.        ,
          0.        ,  1.06066017,  1.1       ,  1.06066017,  1.        ,
          1.        ,  0.        ,  0.        ,  0.        ,  0.        ,
          0.        ,  0.        ,  0.        ,  0.        ])
    fr.run_one_step()
    assert_array_almost_equal(mg.at_node['topographic__steepest_slope'],
                              slope_old)
    lf.map_depressions()
    assert_array_almost_equal(mg.at_node['topographic__steepest_slope'],
                              slope_new)
Example #7
0
def test_pits_as_IDs():
    """
    Smoke test for passing specific IDs, not an array, to the mapper.
    """
    fr.route_flow()
    lf.map_depressions(pits=np.where(mg.at_node['flow__sink_flag'])[0])
    assert_array_almost_equal(mg.at_node['drainage_area'], A_new)
def _compare(a, b):
    """Compare two python objects."""
    global last_keys
    skip_types = ['whitener', 'proj', 'reginv', 'noisenorm', 'nchan',
                  'command_line', 'working_dir', 'mri_file', 'mri_id']
    try:
        if isinstance(a, (dict, Info)):
            assert_true(isinstance(b, (dict, Info)))
            for k, v in six.iteritems(a):
                if k not in b and k not in skip_types:
                    raise ValueError('First one had one second one didn\'t:\n'
                                     '%s not in %s' % (k, b.keys()))
                if k not in skip_types:
                    last_keys.pop()
                    last_keys = [k] + last_keys
                    _compare(v, b[k])
            for k, v in six.iteritems(b):
                if k not in a and k not in skip_types:
                    raise ValueError('Second one had one first one didn\'t:\n'
                                     '%s not in %s' % (k, a.keys()))
        elif isinstance(a, list):
            assert_true(len(a) == len(b))
            for i, j in zip(a, b):
                _compare(i, j)
        elif isinstance(a, sparse.csr.csr_matrix):
            assert_array_almost_equal(a.data, b.data)
            assert_equal(a.indices, b.indices)
            assert_equal(a.indptr, b.indptr)
        elif isinstance(a, np.ndarray):
            assert_array_almost_equal(a, b)
        else:
            assert_equal(a, b)
    except Exception:
        print(last_keys)
        raise
Example #9
0
def test_initial_routing():
    """
    Test the action of fr.route_flow() on the grid.
    """
    fr.route_flow()
    assert_array_equal(mg.at_node['flow__receiver_node'], r_old)
    assert_array_almost_equal(mg.at_node['drainage_area'], A_old)
Example #10
0
def test_D8_D4_route():
    """
    Tests the functionality of D4 routing.
    """
    frD8.route_flow()
    frD4.route_flow()
    lfD8.map_depressions()
    lfD4.map_depressions()
    assert_equal(lfD8.number_of_lakes, 1)
    assert_equal(lfD4.number_of_lakes, 3)

    flow_recD8 = np.array([ 0,  1,  2,  3,  4,  5,  6,  7, 16, 10, 16, 10, 18,
                           13, 14, 14, 15, 16, 10, 18, 20, 21, 16, 16, 16, 18,
                           33, 27, 28, 28, 24, 24, 24, 32, 34, 35, 35, 38, 32,
                           32, 32, 41, 42, 43, 44, 45, 46, 47, 48])
    flow_recD4 = np.array([ 0,  1,  2,  3,  4,  5,  6,  7,  7, 10, 17, 10, 11,
                           13, 14, 14, 15, 16, 17, 18, 20, 21, 21, 16, 17, 18,
                           33, 27, 28, 28, 29, 24, 31, 32, 34, 35, 35, 36, 37,
                           32, 33, 41, 42, 43, 44, 45, 46, 47, 48])
    assert_array_equal(mg1.at_node['flow__receiver_node'], flow_recD8)
    assert_array_equal(mg2.at_node['flow__receiver_node'], flow_recD4)
    assert_array_almost_equal(mg1.at_node['drainage_area'].reshape((7,7))[:,
                                  0].sum(),
                              mg2.at_node['drainage_area'].reshape((7,7))[:,
                                  0].sum())
Example #11
0
 def test_unbounded_approximated(self):
     """ SLSQP: unbounded, approximated jacobian. """
     res = fmin_slsqp(self.fun, [-1.0, 1.0], args = (-1.0, ),
                      iprint = 0, full_output = 1)
     x, fx, its, imode, smode = res
     assert_(imode == 0, imode)
     assert_array_almost_equal(x, [2, 1])
Example #12
0
    def compare_trajectory_files(self, ltraj):
        """ Function to compare trajectory files.

        The idea is to store all the numbers in the file in a list and then using
        numpy to compare the two lists. The numbers are recognized exploiting the
        float function error when applied on strings.

        The strings are compared directly.
        """

        err = False
        for traj in ltraj:
            for straj in traj:
                new_w_list = []
                old_w_list = []
                name = os.path.basename(straj['old_filename'])
                try:
                    old_content = open(straj['old_filename'])
                except IOError as _err:
                    if _err.errno == os.errno.ENOENT:
                        self.msg += 'File %s not found!\n' % straj['old_filename']
                        self.test_status = 'ERROR'
                        continue

                try:
                    new_content = open(straj['new_filename'])
                except IOError as _err:
                    if _err.errno == os.errno.ENOENT:
                        self.msg += 'File %s not found!\n' % straj['new_filename']
                        self.test_status = 'ERROR'
                        continue

                line_c = 1
                for old_line, new_line in zip(old_content, new_content):
                    word_c = 1
                    for old_w, new_w in zip(old_line.split(),
                                            new_line.split()):
                        try:
                            old_w_list.append(float(old_w))
                            new_w_list.append(float(new_w))
                        except ValueError:
                            try:
                                assert old_w == new_w
                            except AssertionError:
                                self.msg += 'Differences at line %d word %d of file  %s' % (line_c, word_c, name)
                                self.test_status = 'FAILED'

                        word_c += 1
                    line_c += 1

                try:
                    npt.assert_array_almost_equal(np.array(new_w_list),
                                                  np.array(old_w_list),
                                                  _parser()['precision'])
                except AssertionError:
                    self.msg += 'Differences in the %s file\n' % name
                    self.test_status = 'FAILED'
                    continue

        return err
Example #13
0
def test_gzip_simple():
    xdense = np.zeros((20,20))
    xdense[2,3] = 2.3
    xdense[4,5] = 4.5
    x = SP.csc_matrix(xdense)

    name = 'gzip_test'
    expected = {'x':x}
    format = '4'

    tmpdir = mkdtemp()
    try:
        fname = pjoin(tmpdir,name)
        mat_stream = gzip.open(fname,mode='wb')
        savemat(mat_stream, expected, format=format)
        mat_stream.close()

        mat_stream = gzip.open(fname,mode='rb')
        actual = loadmat(mat_stream, struct_as_record=True)
        mat_stream.close()
    finally:
        shutil.rmtree(tmpdir)

    assert_array_almost_equal(actual['x'].todense(),
                              expected['x'].todense(),
                              err_msg=repr(actual))
Example #14
0
def test_arburg_imag_output():
    a, b, c = arburg(marple_data, 15)


    a_e, b_e, c_e = (numpy.array([ 2.70936368 -0.77610302j,  5.17482864 -2.73293024j,
        7.03527787 -6.15070038j,  7.89423853-10.20591369j,
        6.84853701-14.07469247j,  4.56915619-16.84486008j,
        1.32687590-18.13284671j, -1.87811360-17.49937286j,
       -4.64976221-15.05888331j, -6.22557823-11.25070227j,
       -6.28367510 -6.93498375j, -4.89652279 -3.24910899j,
       -2.99758653 -0.8736847j , -1.32183647 +0.04527281j,
       -0.35565856 +0.14754881j]),
     0.0054379699760549929,
     numpy.array([-0.18570222-0.87179346j,  0.26402371-0.5190592j ,
        0.07162311-0.46372011j,  0.44463099+0.05080174j,
       -0.02634972-0.14691215j,  0.19255061-0.37032848j,
       -0.25994598-0.55924338j, -0.20237974-0.23641516j,
       -0.40546748-0.40598876j, -0.47824854-0.42553068j,
       -0.51507096-0.49435948j, -0.32530245-0.49134098j,
       -0.21950049-0.37261937j, -0.28613904-0.0921211j ,
       -0.35565856+0.14754881j]))


    assert_array_almost_equal(a, a_e)
    assert_almost_equal(b,  b_e)
    assert_array_almost_equal(c, c_e)
def test_ada_fit_sample_nn_obj():
    """Test fit-sample with nn object"""

    # Resample the data
    nn = NearestNeighbors(n_neighbors=6)
    ada = ADASYN(random_state=RND_SEED, n_neighbors=nn)
    X_resampled, y_resampled = ada.fit_sample(X, Y)

    X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141],
                     [1.25192108, -0.22367336], [0.53366841, -0.30312976],
                     [1.52091956, -0.49283504], [-0.28162401, -2.10400981],
                     [0.83680821, 1.72827342], [0.3084254, 0.33299982],
                     [0.70472253, -0.73309052], [0.28893132, -0.38761769],
                     [1.15514042, 0.0129463], [0.88407872, 0.35454207],
                     [1.31301027, -0.92648734], [-1.11515198, -0.93689695],
                     [-0.18410027, -0.45194484], [0.9281014, 0.53085498],
                     [-0.14374509, 0.27370049], [-0.41635887, -0.38299653],
                     [0.08711622, 0.93259929], [1.70580611, -0.11219234],
                     [0.29427267, 0.21740707], [0.68118697, -0.25220353],
                     [1.37180201, 0.37279378], [-0.59243851, -0.80715327]])
    y_gt = np.array([
        0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0
    ])
    assert_array_almost_equal(X_resampled, X_gt)
    assert_array_equal(y_resampled, y_gt)
Example #16
0
def _test_precision_recall_curve():
    """Test Precision-Recall and aread under PR curve"""
    y_true, _, probas_pred = make_prediction(binary=True)

    p, r, thresholds = precision_recall_curve(y_true, probas_pred)
    precision_recall_auc = auc(r, p)
    assert_array_almost_equal(precision_recall_auc, 0.82, 2)
Example #17
0
def test_roc_curve():
    """Test Area under Receiver Operating Characteristic (ROC) curve"""
    y_true, _, probas_pred = make_prediction(binary=True)

    fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
    roc_auc = auc(fpr, tpr)
    assert_array_almost_equal(roc_auc, 0.80, decimal=2)
Example #18
0
def test_LinearSVC():
    """
    Test basic routines using LinearSVC
    """
    clf = svm.LinearSVC().fit(X, Y)

    # by default should have intercept
    assert clf.fit_intercept

    assert_array_equal(clf.predict(T), true_result)
    assert_array_almost_equal(clf.intercept_, [0], decimal=3)

    # the same with l1 penalty
    clf = svm.LinearSVC(penalty='l1', dual=False).fit(X, Y)
    assert_array_equal(clf.predict(T), true_result)

    # l2 penalty with dual formulation
    clf = svm.LinearSVC(penalty='l2', dual=True).fit(X, Y)
    assert_array_equal(clf.predict(T), true_result)

    # l2 penalty, l1 loss
    clf = svm.LinearSVC(penalty='l2', loss='l1', dual=True).fit(X, Y)
    assert_array_equal(clf.predict(T), true_result)

    # test also decision function
    dec = clf.decision_function(T).ravel()
    res = (dec > 0).astype(np.int) + 1
    assert_array_equal(res, true_result)
Example #19
0
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
    """
    Test that dense liblinear honours intercept_scaling param
    """
    X = [[2, 1],
         [3, 1],
         [1, 3],
         [2, 3]]
    y = [0, 0, 1, 1]
    clf = classifier(fit_intercept=True, penalty='l1', loss='l2',
                     dual=False, C=1, tol=1e-7)
    assert clf.intercept_scaling == 1, clf.intercept_scaling
    assert clf.fit_intercept

    # when intercept_scaling is low the intercept value is highly "penalized"
    # by regularization
    clf.intercept_scaling = 1
    clf.fit(X, y)
    assert_almost_equal(clf.intercept_, 0, decimal=5)

    # when intercept_scaling is sufficiently high, the intercept value
    # is not affected by regularization
    clf.intercept_scaling = 100
    clf.fit(X, y)
    intercept1 = clf.intercept_
    assert intercept1 < -1

    # when intercept_scaling is sufficiently high, the intercept value
    # doesn't depend on intercept_scaling value
    clf.intercept_scaling = 1000
    clf.fit(X, y)
    intercept2 = clf.intercept_
    assert_array_almost_equal(intercept1, intercept2, decimal=2)
Example #20
0
    def test_MR_rosEstimator(self):
        known_z = np.array([
            -2.06188401, -1.66883254, -1.4335397, -1.25837339, -1.11509471,
            -0.99166098, -0.8817426, -0.78156696, -0.68868392, -0.60139747,
            -0.51847288, -0.4389725, -0.36215721, -0.28742406, -0.21426459,
            -0.14223572, -0.07093824, 0.,         0.07093824,  0.14223572,
            0.21426459,  0.28742406,  0.36215721,  0.4389725,  0.51847288,
            0.60139747,  0.68868392,  0.78156696,  0.8817426,  0.99166098,
            1.11509471,  1.25837339,  1.4335397,  1.66883254,  2.06188401
        ])

        known_fd = np.array([
            2., 3.11029054, 3.60383412, 4.04355908, 4.04355908, 4.2, 4.62,
            4.70773991, 5.57, 5.66, 5.86, 6.13826881, 6.65, 6.78, 6.79,
            6.97698797, 7.5, 7.5, 7.5, 8.63, 8.71, 8.99, 9.85, 10.82, 11.25,
            11.25, 12.2, 14.92, 16.77, 17.81, 19.16, 19.19, 19.64, 20.18, 22.97
        ])

        fdarray = np.array(self.mr.data.final_data)
        fdarray.sort()

        # zarray = np.array(self.mr.data.Z)
        # zarray.sort()

        # nptest.assert_array_almost_equal(known_z, zarray, decimal=2)
        nptest.assert_array_almost_equal(known_fd, fdarray, decimal=2)
Example #21
0
def test_decision_function():
    """
    Test decision_function

    Sanity check, test that decision_function implemented in python
    returns the same as the one in libsvm

    TODO: proabably could be simplified
    """
    clf = svm.SVC(kernel='linear').fit(iris.data, iris.target)

    data = iris.data[0]

    sv_start = np.r_[0, np.cumsum(clf.n_support_)]
    n_class = 3

    kvalue = np.dot(data, clf.support_vectors_.T)

    dec = np.empty(n_class * (n_class - 1) / 2)
    p = 0
    for i in range(n_class):
        for j in range(i + 1, n_class):
            coef1 = clf.dual_coef_[j - 1]
            coef2 = clf.dual_coef_[i]
            idx1 = slice(sv_start[i], sv_start[i + 1])
            idx2 = slice(sv_start[j], sv_start[j + 1])
            s = np.dot(coef1[idx1],  kvalue[idx1]) + \
                np.dot(coef2[idx2], kvalue[idx2]) + \
                clf.intercept_[p]
            dec[p] = s
            p += 1

    assert_array_almost_equal(-dec, np.ravel(clf.decision_function(data)))
Example #22
0
def test_scaling():
    # Test integer scaling from float
    # Analyze headers cannot do float-integer scaling '''
    hdr = AnalyzeHeader()
    assert_true(hdr.default_x_flip)
    shape = (1,2,3)
    hdr.set_data_shape(shape)
    hdr.set_data_dtype(np.float32)
    data = np.ones(shape, dtype=np.float64)
    S = BytesIO()
    # Writing to float datatype doesn't need scaling
    hdr.data_to_fileobj(data, S)
    rdata = hdr.data_from_fileobj(S)
    assert_array_almost_equal(data, rdata)
    # Writing to integer datatype does, and raises an error
    hdr.set_data_dtype(np.int32)
    assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, BytesIO())
    # unless we aren't scaling, in which case we convert the floats to
    # integers and write
    _write_data(hdr, data, S)
    rdata = hdr.data_from_fileobj(S)
    assert_true(np.allclose(data, rdata))
    # This won't work for floats that aren't close to integers
    data_p5 = data + 0.5
    _write_data(hdr, data_p5, S)
    rdata = hdr.data_from_fileobj(S)
    assert_false(np.allclose(data_p5, rdata))
Example #23
0
 def runTest(self):
     """testing auto-conversion of masked arrays and packed integers"""
     file = netCDF4.Dataset(self.file)
     datamasked = file.variables['maskeddata']
     datamasked2 = file.variables['maskeddata2']
     datamasked3 = file.variables['maskeddata3']
     datapacked = file.variables['packeddata']
     datapacked2 = file.variables['packeddata2']
     datapacked3 = file.variables['packeddata3']
     # check missing_value, scale_factor and add_offset attributes.
     assert datamasked.missing_value == missing_value
     assert datapacked.scale_factor == scale_factor
     assert datapacked.add_offset == add_offset
     # no auto-conversion.
     datamasked.set_auto_maskandscale(False)
     datamasked2.set_auto_maskandscale(False)
     datapacked.set_auto_maskandscale(False)
     assert_array_equal(datapacked[:],packeddata2)
     assert_array_equal(datamasked3[:],marr3)
     assert_array_almost_equal(datamasked[:],ranarr)
     assert_array_almost_equal(datamasked2[:],ranarr2)
     # auto-conversion
     datamasked.set_auto_maskandscale(True)
     datamasked2.set_auto_maskandscale(True)
     datapacked.set_auto_maskandscale(True)
     datapacked2.set_auto_maskandscale(False)
     assert_array_almost_equal(datamasked[:].filled(),ranarr)
     assert_array_almost_equal(datamasked2[:].filled(),ranarr2)
     assert_array_almost_equal(datapacked[:],packeddata,decimal=4)
     assert(datapacked3[:].dtype == NP.float)
     # added to test fix to issue 46 (result before r865 was 10)
     assert_array_equal(datapacked2[0],11)
     file.close()
Example #24
0
 def test_data_scaling(self):
     hdr = self.header_class()
     hdr.set_data_shape((1,2,3))
     hdr.set_data_dtype(np.int16)
     S3 = BytesIO()
     data = np.arange(6, dtype=np.float64).reshape((1,2,3))
     # This uses scaling
     hdr.data_to_fileobj(data, S3)
     data_back = hdr.data_from_fileobj(S3)
     # almost equal
     assert_array_almost_equal(data, data_back, 4)
     # But not quite
     assert_false(np.all(data == data_back))
     # This is exactly the same call, just testing it works twice
     data_back2 = hdr.data_from_fileobj(S3)
     assert_array_equal(data_back, data_back2, 4)
     # Rescaling is the default
     hdr.data_to_fileobj(data, S3, rescale=True)
     data_back = hdr.data_from_fileobj(S3)
     assert_array_almost_equal(data, data_back, 4)
     assert_false(np.all(data == data_back))
     # This doesn't use scaling, and so gets perfect precision
     hdr.data_to_fileobj(data, S3, rescale=False)
     data_back = hdr.data_from_fileobj(S3)
     assert_true(np.all(data == data_back))
Example #25
0
def test_decision_function():
    # Test decision_function
    # Sanity check, test that decision_function implemented in python
    # returns the same as the one in libsvm
    # multi class:
    clf = svm.SVC(kernel='linear', C=0.1,
                  decision_function_shape='ovo').fit(iris.data, iris.target)

    dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_

    assert_array_almost_equal(dec, clf.decision_function(iris.data))

    # binary:
    clf.fit(X, Y)
    dec = np.dot(X, clf.coef_.T) + clf.intercept_
    prediction = clf.predict(X)
    assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
    assert_array_almost_equal(
        prediction,
        clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
    expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
    assert_array_almost_equal(clf.decision_function(X), expected, 2)

    # kernel binary:
    clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
    clf.fit(X, Y)

    rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
    dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
    assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
Example #26
0
def test_linearsvc():
    # Test basic routines using LinearSVC
    clf = svm.LinearSVC(random_state=0).fit(X, Y)

    # by default should have intercept
    assert_true(clf.fit_intercept)

    assert_array_equal(clf.predict(T), true_result)
    assert_array_almost_equal(clf.intercept_, [0], decimal=3)

    # the same with l1 penalty
    clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
    assert_array_equal(clf.predict(T), true_result)

    # l2 penalty with dual formulation
    clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
    assert_array_equal(clf.predict(T), true_result)

    # l2 penalty, l1 loss
    clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
    clf.fit(X, Y)
    assert_array_equal(clf.predict(T), true_result)

    # test also decision function
    dec = clf.decision_function(T)
    res = (dec > 0).astype(np.int) + 1
    assert_array_equal(res, true_result)
Example #27
0
def test_anisotropic_power():
    for n_coeffs in [6, 15, 28, 45, 66, 91]:
        for norm_factor in [0.0005, 0.00001]:

            # Create some really simple cases:
            coeffs = np.ones((3, n_coeffs))
            max_order = calculate_max_order(coeffs.shape[-1])
            # For the case where all coeffs == 1, the ap is simply log of the
            # number of even orders up to the maximal order:
            analytic = (np.log(len(range(2, max_order + 2, 2))) -
                        np.log(norm_factor))

            answers = [analytic] * 3
            apvals = anisotropic_power(coeffs, norm_factor=norm_factor)
            assert_array_almost_equal(apvals, answers)
            # Test that this works for single voxel arrays as well:
            assert_array_almost_equal(
                anisotropic_power(coeffs[1],
                                  norm_factor=norm_factor),
                answers[1])

    # Test that even when we look at an all-zeros voxel, this
    # avoids a log-of-zero warning:
    with warnings.catch_warnings(record=True) as w:
        assert_equal(anisotropic_power(np.zeros(6)), 0)
        assert len(w) == 0
Example #28
0
 def test_rational_expr(self):
     func = NumExpr((E.a + 2.0*E.b) / (1 + E.a + 4*E.b*E.b))
     a = arange(1e6)
     b = arange(1e6) * 0.1
     x = (a + 2*b) / (1 + a + 4*b*b)
     y = func(a, b)
     assert_array_almost_equal(x, y)
def test_byte_order_median():
    """Regression test for #413: median_filter does not handle bytes orders."""
    a = np.arange(9, dtype='<f4').reshape(3, 3)
    ref = ndimage.filters.median_filter(a,(3, 3))
    b = np.arange(9, dtype='>f4').reshape(3, 3)
    t = ndimage.filters.median_filter(b, (3, 3))
    assert_array_almost_equal(ref, t)
Example #30
0
def test_vector_conversion():
    event = get_event(ep=True)
    assert_array_almost_equal(
        event.view(DTYPE),
        ptepm2ep(ep2ptepm(event)).view(DTYPE))
Example #31
0
def test_tensor_model():
    fdata, fbval, fbvec = get_fnames('small_25')
    data1 = load_nifti_data(fdata)
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        npt.assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        npt.assert_equal(dtifit.fa < 0.9, True)
        npt.assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        npt.assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    npt.assert_equal(dtifit.fa.shape, data.shape[:3])
    npt.assert_equal(dtifit.ad.shape, data.shape[:3])
    npt.assert_equal(dtifit.md.shape, data.shape[:3])
    npt.assert_equal(dtifit.rd.shape, data.shape[:3])
    npt.assert_equal(dtifit.trace.shape, data.shape[:3])
    npt.assert_equal(dtifit.mode.shape, data.shape[:3])
    npt.assert_equal(dtifit.linearity.shape, data.shape[:3])
    npt.assert_equal(dtifit.planarity.shape, data.shape[:3])
    npt.assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    npt.assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_fnames('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        npt.assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                      evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    npt.assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method,
                                       return_S0_hat=True)

        tensor_fit = tensor_model.fit(Y)
        assert tensor_fit.model is tensor_model
        npt.assert_equal(tensor_fit.shape, Y.shape[:-1])
        npt.assert_array_almost_equal(tensor_fit.evals[0], evals)
        npt.assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            npt.assert_(np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                      evecs[:, i]) < 10e-6) or
                        np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                      evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        npt.assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                      err_msg=err_msg)

        npt.assert_almost_equal(tensor_fit.md[0], md)
        npt.assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        npt.assert_equal(tensor_fit.directions.shape[-2], 1)
        npt.assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    npt.assert_raises(ValueError,
                      dti.TensorModel,
                      gtab,
                      fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    npt.assert_array_almost_equal(fit[0].evals, evals)

    # Return S0_test
    tensor_model = dti.TensorModel(gtab, return_S0_hat=True)
    fit = tensor_model.fit(data)
    npt.assert_array_almost_equal(fit[0].evals, evals)
    npt.assert_array_almost_equal(fit[0].S0_hat, b0)

    # Evals should be high for high diffusion voxel
    assert all(fit[1].evals > evals[0] * .9)

    # Evals should be zero where data is masked
    npt.assert_array_almost_equal(fit[2].evals, 0.)
Example #32
0
 def test_list_peaks(self):
     out = peakutils.indexes([1, 2, 1, 3, 5, 7, 4, 1], thres=0, min_dist=1)
     expected = numpy.array([1, 5])
     assert_array_almost_equal(out, expected)
Example #33
0
 def test_near_peaks2(self):
     out = peakutils.indexes(self.near, thres=0, min_dist=1)
     expected = numpy.array([1, 3, 5, 7, 9])
     assert_array_almost_equal(out, expected)
Example #34
0
    def test_absolute_threshold(self):
        x = [0, 5, 0, 8, 0, 15, 0]
        out1 = peakutils.indexes(x, thres=3, thres_abs=True)
        assert_array_almost_equal(out1, [1, 3, 5])

        out2 = peakutils.indexes(x, thres=5, thres_abs=True)
        assert_array_almost_equal(out2, [3, 5])

        out3 = peakutils.indexes(x, thres=7, thres_abs=True)
        assert_array_almost_equal(out3, [3, 5])

        out4 = peakutils.indexes(x, thres=14, thres_abs=True)
        assert_array_almost_equal(out4, [5])

        out5 = peakutils.indexes(x, thres=15, thres_abs=True)
        assert_array_almost_equal(out5, [])

        out6 = peakutils.indexes(x, thres=16, thres_abs=True)
        assert_array_almost_equal(out6, [])
 def test_consistency_with_python(self):
     M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
     M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r)
     assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14)
Example #36
0
def test_predict():
    """
    Test model prediction API
    """
    psphere = get_sphere('symmetric362')
    bvecs = np.concatenate(([[1, 0, 0]], psphere.vertices))
    bvals = np.zeros(len(bvecs)) + 1000
    bvals[0] = 0
    gtab = grad.gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
    mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
              np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])]
    S = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)

    dm = dti.TensorModel(gtab, 'LS', return_S0_hat=True)
    dmfit = dm.fit(S)
    npt.assert_array_almost_equal(dmfit.predict(gtab, S0=100), S)
    npt.assert_array_almost_equal(dmfit.predict(gtab), S)
    npt.assert_array_almost_equal(dm.predict(dmfit.model_params, S0=100), S)

    fdata, fbvals, fbvecs = get_fnames()
    data = load_nifti_data(fdata)
    # Make the data cube a bit larger:
    data = np.tile(data.T, 2).T
    gtab = grad.gradient_table(fbvals, fbvecs)
    dtim = dti.TensorModel(gtab)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)
    # Predict using S0_hat:
    dtim = dti.TensorModel(gtab, return_S0_hat=True)
    dtif = dtim.fit(data)
    p = dtif.predict(gtab)
    npt.assert_equal(p.shape, data.shape)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)

    # Test iter_fit_tensor with S0_hat
    dtim = dti.TensorModel(gtab, step=2, return_S0_hat=True)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)

    # Use a smaller step in predicting:

    dtim = dti.TensorModel(gtab, step=2)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)
    # And with a scalar S0:
    S0 = 1
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)
    # Assign the step through kwarg:
    p = dtif.predict(gtab, S0, step=1)
    npt.assert_equal(p.shape, data.shape)
    # And without S0:
    p = dtif.predict(gtab, step=1)
    npt.assert_equal(p.shape, data.shape)
Example #37
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    npt.assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Testing WLS Fit on single voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                  err_msg="Calculation of tensor from Y does "
                                          "not compare to analytical solution")
    npt.assert_almost_equal(tensor_est.md[0], md)
    npt.assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    npt.assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    npt.assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    npt.assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
Example #38
0
 def test__getitem__tuple(self, sim=sim):
     p = sim.Population(23, sim.HH_cond_exp())
     pv = p[(3, 5, 7)]
     self.assertEqual(pv.parent, p)
     assert_array_almost_equal(pv.all_cells, p.all_cells[[3, 5, 7]])
Example #39
0
def test_nlls_fit_tensor():
    """
    Test the implementation of NLLS and RESTORE
    """

    b0 = 1000.
    bvecs, bval = read_bvec_file(get_fnames('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1,) + Y.shape

    # Estimate tensor from test signals and compare against expected result
    # using non-linear least squares:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # You can also do this without the Jacobian (though it's slower):
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', jac=False)
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # Using the gmm weighting scheme:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # If you use sigma weighting, you'd better provide a sigma:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='sigma')
    npt.assert_raises(ValueError, tensor_model.fit, Y)

    # Use NLLS with some actual 4D data:
    data, bvals, bvecs = get_fnames('small_25')
    gtab = grad.gradient_table(bvals, bvecs)
    tm1 = dti.TensorModel(gtab, fit_method='NLLS')
    dd = load_nifti_data(data)
    tf1 = tm1.fit(dd)
    tm2 = dti.TensorModel(gtab)
    tf2 = tm2.fit(dd)

    npt.assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
Example #40
0
    def test_singleterm_siso_monomial_evaluation(self):

        m = Monomial(1, 0, [2], [], [2], [])
        y = [[1.12], [2.12]]
        u = []
        assert_array_almost_equal(m(y, u), 2.12**2)
Example #41
0
def test_crop():
    """Test cropping with annotations."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']
    duration = np.full_like(onset, 0.5)
    description = ['bad %d' % k for k in range(len(onset))]
    annot = mne.Annotations(onset,
                            duration,
                            description,
                            orig_time=raw.info['meas_date'])
    raw.set_annotations(annot)

    split_time = raw.times[-1] / 2. + 2.
    split_idx = len(onset) // 2 + 1
    raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])
    assert_array_equal(raw_cropped_left.annotations.description,
                       raw.annotations.description[:split_idx])
    assert_allclose(raw_cropped_left.annotations.duration,
                    raw.annotations.duration[:split_idx])
    assert_allclose(raw_cropped_left.annotations.onset,
                    raw.annotations.onset[:split_idx])
    raw_cropped_right = raw.copy().crop(split_time, None)
    assert_array_equal(raw_cropped_right.annotations.description,
                       raw.annotations.description[split_idx:])
    assert_allclose(raw_cropped_right.annotations.duration,
                    raw.annotations.duration[split_idx:])
    assert_allclose(raw_cropped_right.annotations.onset,
                    raw.annotations.onset[split_idx:])
    raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],
                                      verbose='debug')
    assert_allclose(raw_concat.times, raw.times)
    assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)
    assert_and_remove_boundary_annot(raw_concat)
    # Ensure we annotations survive round-trip crop->concat
    assert_array_equal(raw_concat.annotations.description,
                       raw.annotations.description)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(raw_concat.annotations, attr),
                        getattr(raw.annotations, attr),
                        err_msg='Failed for %s:' % (attr, ))

    raw.set_annotations(None)  # undo

    # Test concatenating annotations with and without orig_time.
    raw2 = raw.copy()
    raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))
    raw2.set_annotations(Annotations([2.], [3], 'BAD', None))
    expected_onset = [45., 2. + raw._last_time]
    raw = concatenate_raws([raw, raw2])
    assert_and_remove_boundary_annot(raw)
    assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)

    # Test IO
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-annot.fif')
    raw.annotations.save(fname)
    annot_read = read_annotations(fname)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(annot_read, attr),
                        getattr(raw.annotations, attr))
    assert annot_read.orig_time == raw.annotations.orig_time
    assert_array_equal(annot_read.description, raw.annotations.description)
    annot = Annotations((), (), ())
    annot.save(fname)
    pytest.raises(IOError, read_annotations, fif_fname)  # none in old raw
    annot = read_annotations(fname)
    assert isinstance(annot, Annotations)
    assert len(annot) == 0
    annot.crop()  # test if cropping empty annotations doesn't raise an error
    # Test that empty annotations can be saved with an object
    fname = op.join(tempdir, 'test_raw.fif')
    raw.set_annotations(annot)
    raw.save(fname)
    raw_read = read_raw_fif(fname)
    assert isinstance(raw_read.annotations, Annotations)
    assert len(raw_read.annotations) == 0
    raw.set_annotations(None)
    raw.save(fname, overwrite=True)
    raw_read = read_raw_fif(fname)
    assert raw_read.annotations is not None  # XXX to be fixed in #5416
    assert len(raw_read.annotations.onset) == 0  # XXX to be fixed in #5416
Example #42
0
def test_State_matmul_rmatmul_ndarray():
    H = State([[-5, -2], [1, 0]], [[2], [0]], [3, 1], 1)
    J1 = np.array([[-5., -2., 0., 0., 0., 0., 2., 4., 6., 8.],
                   [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
                   [0., 0., -5., -2., 0., 0., 10., 12., 14., 16.],
                   [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
                   [0., 0., 0., 0., -5., -2., 18., 20., 22., 24.],
                   [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
                   [3., 1., 0., 0., 0., 0., 1., 2., 3., 4.],
                   [0., 0., 3., 1., 0., 0., 5., 6., 7., 8.],
                   [0., 0., 0., 0., 3., 1., 9., 10., 11., 12.]])

    J2 = np.array([[-5., -2., 0., 0., 0., 0., 2., 0., 0.],
                   [1., 0., 0., 0., 0., 0., 0., 0., 0.],
                   [0., 0., -5., -2., 0., 0., 0., 2., 0.],
                   [0., 0., 1., 0., 0., 0., 0., 0., 0.],
                   [0., 0., 0., 0., -5., -2., 0., 0., 2.],
                   [0., 0., 0., 0., 1., 0., 0., 0., 0.],
                   [3., 1., 6., 2., 9., 3., 1., 2., 3.],
                   [12., 4., 15., 5., 18., 6., 4., 5., 6.],
                   [21., 7., 24., 8., 27., 9., 7., 8., 9.],
                   [30., 10., 33., 11., 36., 12., 10., 11., 12.]])

    mat = np.arange(1, 13).reshape(3, 4)
    Fm = concatenate_state_matrices(mat @ H)
    assert_array_almost_equal(J1, Fm)
    Fm = concatenate_state_matrices(H @ mat)
    assert_array_almost_equal(J1, Fm)

    mat = np.arange(1, 13).reshape(4, 3)
    Fm = concatenate_state_matrices(mat @ H)
    assert_array_almost_equal(J2, Fm)
    Fm = concatenate_state_matrices(H @ mat)
    assert_array_almost_equal(J2, Fm)

    G, H = random_state_model(2, 2, 2), random_state_model(2, 3, 3)
    with assert_raises(ValueError):
        G @ H

    # Scalars
    G = random_state_model(1)
    H = 0. @ G
    assert H._isgain
    H = 1. @ G
    assert_almost_equal(concatenate_state_matrices(G),
                        concatenate_state_matrices(H))

    # static gain mults
    G = random_state_model(0, 4, 5)
    H = random_state_model(0, 5, 4)
    assert (G @ H)._isgain
    assert_equal((G @ H).shape, (4, 4))
    H = random_state_model(0, 3, 3)
    with assert_raises(ValueError):
        G @ H

    G = State(1.)
    H = random_state_model(1, 2, 2)
    assert_almost_equal(concatenate_state_matrices(G @ H),
                        concatenate_state_matrices(H @ G))

    G = random_state_model(1, 4, 5)
    H = random_state_model(1, 4, 5)
    with assert_raises(ValueError):
        G @ H
Example #43
0
 def test__getitem__list(self, sim=sim):
     p = sim.Population(23, sim.HH_cond_exp())
     pv = p[list(range(3, 9))]
     self.assertEqual(pv.parent, p)
     assert_array_almost_equal(pv.all_cells, p.all_cells[3:9])
Example #44
0
def check_interpolateArray(order):
    def interpolateArray(data, x):
        result = pg.interpolateArray(data, x, order=order)
        assert result.shape == x.shape[:-1] + data.shape[x.shape[-1]:]
        return result

    data = np.array([[1., 2., 4.], [10., 20., 40.], [100., 200., 400.]])

    # test various x shapes
    interpolateArray(data, np.ones((1, )))
    interpolateArray(data, np.ones((2, )))
    interpolateArray(data, np.ones((1, 1)))
    interpolateArray(data, np.ones((1, 2)))
    interpolateArray(data, np.ones((5, 1)))
    interpolateArray(data, np.ones((5, 2)))
    interpolateArray(data, np.ones((5, 5, 1)))
    interpolateArray(data, np.ones((5, 5, 2)))
    with pytest.raises(TypeError):
        interpolateArray(data, np.ones((3, )))
    with pytest.raises(TypeError):
        interpolateArray(data, np.ones((
            1,
            3,
        )))
    with pytest.raises(TypeError):
        interpolateArray(data, np.ones((
            5,
            5,
            3,
        )))

    x = np.array([
        [0.3, 0.6],
        [1., 1.],
        [
            0.501, 1.
        ],  # NOTE: testing at exactly 0.5 can yield different results from map_coordinates
        [0.501, 2.501],  # due to differences in rounding
        [10., 10.]
    ])

    result = interpolateArray(data, x)
    # make sure results match ndimage.map_coordinates
    import scipy.ndimage
    spresult = scipy.ndimage.map_coordinates(data, x.T, order=order)
    #spresult = np.array([  5.92,  20.  ,  11.  ,   0.  ,   0.  ])  # generated with the above line

    assert_array_almost_equal(result, spresult)

    # test mapping when x.shape[-1] < data.ndim
    x = np.array([[0.3, 0], [0.3, 1], [0.3, 2]])
    r1 = interpolateArray(data, x)
    x = np.array([0.3])  # should broadcast across axis 1
    r2 = interpolateArray(data, x)

    assert_array_almost_equal(r1, r2)

    # test mapping 2D array of locations
    x = np.array([[[0.501, 0.501], [0.501, 1.0], [0.501, 1.501]],
                  [[1.501, 0.501], [1.501, 1.0], [1.501, 1.501]]])

    r1 = interpolateArray(data, x)
    r2 = scipy.ndimage.map_coordinates(data, x.transpose(2, 0, 1), order=order)
    #r2 = np.array([[   8.25,   11.  ,   16.5 ],  # generated with the above line
    #[  82.5 ,  110.  ,  165.  ]])

    assert_array_almost_equal(r1, r2)
Example #45
0
def test_transfer_to_state():
    # Models with static column/row
    num, den = [[1, -1], [[1, -1], 0]], [[[1, 2], 1], [[1, 2], 1]]
    den2, num2 = [list(i) for i in zip(*den)], [list(i) for i in zip(*num)]

    G = Transfer(num, den)
    H = Transfer(num2, den2)

    Gs = transfer_to_state(G)
    Hs = transfer_to_state(H)
    Gm = concatenate_state_matrices(Gs)
    Hm = concatenate_state_matrices(Hs)
    assert_array_almost_equal(Gm, np.array([[-2, 1, 0], [1, 0, -1], [-3, 1,
                                                                     0]]))
    assert_array_almost_equal(
        Hm,
        np.array([[-2., 0., 1., 0.], [0., -2., 0., 1.], [1., -3., 0., 1.],
                  [0., 0., -1., 0.]]))

    # Example from Kalman 1963
    num = [[3 * np.poly([-3, -5]), [6, 6], [2, 7], [2, 5]],
           [2, 1, [2, 10], [8, 16]],
           [[2, 14, 36], [-2, 0], 1, 2 * np.convolve([5, 17], [1, 2])]]
    den = [[np.poly([-1, -2, -4]), [1, 6, 8], [1, 7, 12], [1, 5, 6]],
           [[1, 8, 15], [1, 3],
            np.poly([-1, -2, -3]),
            np.poly([-1, -3, -5])],
           [np.poly([-1, -3, -5]), [1, 4, 3], [1, 3],
            np.poly([-1, -3, -5])]]

    G = Transfer(num, den)
    H = transfer_to_state(G)
    p = H.poles
    p.sort()
    assert_array_almost_equal(
        p,
        np.array([
            -5. + 0.j, -5. + 0.j, -4. + 0.j, -3. + 0.j, -3. + 0.j, -3. + 0.j,
            -2. + 0.j, -2. + 0.j, -1. + 0.j, -1. + 0.j, -1. + 0.j
        ]))

    # Reported in gh-#42
    G = Transfer([[[87.8, 8.78], [-103.68, -8.64]],
                  [[129.84, 10.82], [-109.6, -10.96]]], [562.5, 82.5, 1])
    Gss = transfer_to_state(G)
    assert_array_almost_equal(
        Gss.a, np.kron(np.eye(2), [[0., 1.], [-2 / 1125, -11 / 75]]))
    assert_array_almost_equal(Gss.b, [[0, 0], [1, 0], [0, 0], [0, 1]])
    des_c = np.array([[
        0.01560888888888889, 0.1560888888888889, -0.015360000000000002,
        -0.18432
    ],
                      [
                          0.019235555555555558, 0.23082666666666668,
                          -0.019484444444444447, -0.19484444444444443
                      ]])

    assert_array_almost_equal(Gss.c, des_c)
    assert_array_almost_equal(Gss.d, np.zeros([2, 2]))

    # reported in gh-#50
    num = [[[61.79732492202783, 36.24988430260625, 0.7301196233698941],
            [0.0377840674057878, 0.9974993795127982, 21.763622825733773]]]
    den = [[[84.64, 18.4, 1.0], [1.0, 7.2, 144.0]]]

    TF = transfer_to_state((num, den))
    assert_array_almost_equal([
        -3.6 - 1.14472704e+01j,
        -3.6 + 1.14472704e+01j,
        -0.10869565 - 1.74405237e-07j,
        -0.10869565 + 1.74405237e-07j,
    ], np.sort(TF.poles))
    assert TF.zeros.size == 0

    # rectengular static gain
    gain = np.ones([2, 3])
    Gss = transfer_to_state(Transfer(gain))
    assert_array_almost_equal(gain, Gss.d)
Example #46
0
def test_sum_prop_is_one():
    # %%
    mg = RasterModelGrid((10, 10), xy_spacing=(1, 1))
    mg.add_field("topographic__elevation", mg.node_x + mg.node_y, at="node")
    fa = PriorityFloodFlowRouter(mg, separate_hill_flow=True, suppress_out=True)
    fa.run_one_step()

    # Single flow
    props_Pf = mg.at_node["flow__receiver_proportions"]
    testing.assert_array_almost_equal(
        props_Pf,
        np.ones_like(props_Pf),
        decimal=5,
        err_msg="Sum of flow proportions is not equal to one",
        verbose=True,
    )

    # Multiple flow
    props_Pf = mg.at_node["hill_flow__receiver_proportions"]
    props_Pf[props_Pf == -1] = 0
    props_Pf = props_Pf.sum(axis=1)
    testing.assert_array_almost_equal(
        props_Pf,
        np.ones_like(props_Pf),
        decimal=5,
        err_msg="Sum of flow proportions is not equal to one",
        verbose=True,
    )

    # %% multiple flow with D8 over hills
    mg = RasterModelGrid((10, 10), xy_spacing=(1, 1))
    mg.add_field("topographic__elevation", mg.node_x + mg.node_y, at="node")
    fa = PriorityFloodFlowRouter(
        mg, separate_hill_flow=True, hill_flow_metric="D8", suppress_out=True
    )
    fa.run_one_step()

    # Multiple flow
    props_Pf = mg.at_node["hill_flow__receiver_proportions"]
    testing.assert_array_almost_equal(
        props_Pf,
        np.ones_like(props_Pf),
        decimal=5,
        err_msg="Sum of flow proportions is not equal to one",
        verbose=True,
    )

    # %% multiple flow with D8 over rivers
    mg = RasterModelGrid((10, 10), xy_spacing=(1, 1))
    mg.add_field("topographic__elevation", mg.node_x + mg.node_y, at="node")
    fa = PriorityFloodFlowRouter(mg, flow_metric="Quinn", suppress_out=True)
    fa.run_one_step()

    # Multiple flow
    props_Pf = mg.at_node["flow__receiver_proportions"]
    props_Pf[props_Pf == -1] = 0
    props_Pf = np.sum(props_Pf, axis=1)
    testing.assert_array_almost_equal(
        props_Pf,
        np.ones_like(props_Pf),
        decimal=5,
        err_msg="Sum of flow proportions is not equal to one",
        verbose=True,
    )
Example #47
0
    def test_ger(self):

        for p in 'sd':
            f = getattr(fblas, p + 'ger', None)
            if f is None: continue
            assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]])
            assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]),
                                      [[6, 8], [12, 16], [18, 24]])

            assert_array_almost_equal(f(1, [1, 2], [3, 4], a=[[1, 2], [3, 4]]),
                                      [[4, 6], [9, 12]])

        for p in 'cz':
            f = getattr(fblas, p + 'geru', None)
            if f is None: continue
            assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
                                      [[3j, 4j], [6, 8]])
            assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]),
                                      [[6, 8], [12, 16], [18, 24]])

        for p in 'cz':
            for name in ('ger', 'gerc'):
                f = getattr(fblas, p + name, None)
                if f is None: continue
                assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
                                          [[3j, 4j], [6, 8]])
                assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]),
                                          [[6, 8], [12, 16], [18, 24]])
def test_pdf_correctness():
    for t in MULTIVARIATE_T_TESTS:
        val = multivariate_t.pdf(t['x'], t['mu'], t['shape'], t['df'])
        assert_array_almost_equal(val, t['ans'])
    def test_validity_greeks(self):
        from py_vollib_vectorized.greeks import delta as my_delta, theta as my_theta, gamma as my_gamma, \
            rho as my_rho, \
            vega as my_vega

        from py_vollib.black_scholes.greeks.numerical import delta as original_delta, gamma as original_gamma, \
            rho as original_rho, theta as original_theta, vega as original_vega

        data = pd.read_csv("fake_data.csv")
        ivs = vectorized_implied_volatility(
            price=data["MidPx"].values,  # current option price
            S=data["Px"].values,  # underlying asset price
            K=data["Strike"].values,  # strike
            t=data["Annualized Time To Expiration"].
            values,  # nroamlized days to expiration
            r=data["Interest Free Rate"].values,  # interest free rate
            flag=data["Flag"].values,  # call or put
        )

        data["IV"] = ivs

        my_deltas = my_delta(
            flag=data["Flag"],
            S=data["Px"],
            K=data["Strike"],
            t=data["Annualized Time To Expiration"],
            r=data["Interest Free Rate"],
            sigma=data["IV"],
        )
        my_thetas = my_theta(
            flag=data["Flag"],
            S=data["Px"],
            K=data["Strike"],
            t=data["Annualized Time To Expiration"],
            r=data["Interest Free Rate"],
            sigma=data["IV"],
        )
        my_rhos = my_rho(
            flag=data["Flag"],
            S=data["Px"],
            K=data["Strike"],
            t=data["Annualized Time To Expiration"],
            r=data["Interest Free Rate"],
            sigma=data["IV"],
        )
        my_vegas = my_vega(
            flag=data["Flag"],
            S=data["Px"],
            K=data["Strike"],
            t=data["Annualized Time To Expiration"],
            r=data["Interest Free Rate"],
            sigma=data["IV"],
        )
        my_gammas = my_gamma(
            flag=data["Flag"],
            S=data["Px"],
            K=data["Strike"],
            t=data["Annualized Time To Expiration"],
            r=data["Interest Free Rate"],
            sigma=data["IV"],
        )
        orig_ds, orig_ts, orig_rs, orig_vs, orig_gs = [], [], [], [], []
        for i in range(len(data)):
            orig_d = original_delta(
                flag=data["Flag"].iloc[i],
                S=data["Px"].iloc[i],
                K=data["Strike"].iloc[i],
                t=data["Annualized Time To Expiration"].iloc[i],
                r=data["Interest Free Rate"].iloc[i],
                sigma=data["IV"].iloc[i],
            )
            orig_t = original_theta(
                flag=data["Flag"].iloc[i],
                S=data["Px"].iloc[i],
                K=data["Strike"].iloc[i],
                t=data["Annualized Time To Expiration"].iloc[i],
                r=data["Interest Free Rate"].iloc[i],
                sigma=data["IV"].iloc[i],
            )
            orig_r = original_rho(
                flag=data["Flag"].iloc[i],
                S=data["Px"].iloc[i],
                K=data["Strike"].iloc[i],
                t=data["Annualized Time To Expiration"].iloc[i],
                r=data["Interest Free Rate"].iloc[i],
                sigma=data["IV"].iloc[i],
            )
            orig_v = original_vega(
                flag=data["Flag"].iloc[i],
                S=data["Px"].iloc[i],
                K=data["Strike"].iloc[i],
                t=data["Annualized Time To Expiration"].iloc[i],
                r=data["Interest Free Rate"].iloc[i],
                sigma=data["IV"].iloc[i],
            )
            orig_g = original_gamma(
                flag=data["Flag"].iloc[i],
                S=data["Px"].iloc[i],
                K=data["Strike"].iloc[i],
                t=data["Annualized Time To Expiration"].iloc[i],
                r=data["Interest Free Rate"].iloc[i],
                sigma=data["IV"].iloc[i],
            )
            orig_ds.append(orig_d.iloc[0])
            orig_ts.append(orig_t.iloc[0])
            orig_rs.append(orig_r.iloc[0])
            orig_vs.append(orig_v.iloc[0])
            orig_gs.append(orig_g.iloc[0])

        orig_ds = np.array(orig_ds)
        orig_ts = np.array(orig_ts)
        orig_rs = np.array(orig_rs)
        orig_vs = np.array(orig_vs)
        orig_gs = np.array(orig_gs)

        self.assertIsNone(assert_array_almost_equal(my_deltas, orig_ds))
        self.assertIsNone(assert_array_almost_equal(my_gammas, orig_gs))
        self.assertIsNone(assert_array_almost_equal(my_rhos, orig_rs))
        self.assertIsNone(assert_array_almost_equal(my_vegas, orig_vs))
        self.assertIsNone(assert_array_almost_equal(my_thetas, orig_ts))
Example #50
0
def test_tf_lcmv():
    """Test TF beamforming based on LCMV
    """
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.io.Raw(fname_raw, preload=True)
    forward = mne.read_forward_solution(fname_fwd)

    event_id, tmin, tmax = 1, -0.2, 0.2

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info,
                           meg=True,
                           eeg=False,
                           stim=True,
                           eog=True,
                           exclude='bads',
                           selection=left_temporal_channels)

    # Read epochs
    epochs = mne.Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        proj=True,
                        picks=picks,
                        baseline=None,
                        preload=False,
                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
    epochs.drop_bad_epochs()

    freq_bins = [(4, 12), (15, 40)]
    time_windows = [(-0.1, 0.1), (0.0, 0.2)]
    win_lengths = [0.2, 0.2]
    tstep = 0.1
    reg = 0.05

    source_power = []
    noise_covs = []
    for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):
        raw_band = raw.copy()
        raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, picks=picks)
        epochs_band = mne.Epochs(raw_band,
                                 epochs.events,
                                 epochs.event_id,
                                 tmin=tmin,
                                 tmax=tmax,
                                 baseline=None,
                                 proj=True,
                                 picks=picks)
        with warnings.catch_warnings(record=True):  # not enough samples
            noise_cov = compute_covariance(epochs_band,
                                           tmin=tmin,
                                           tmax=tmin + win_length)
        noise_cov = mne.cov.regularize(noise_cov,
                                       epochs_band.info,
                                       mag=reg,
                                       grad=reg,
                                       eeg=reg,
                                       proj=True)
        noise_covs.append(noise_cov)
        del raw_band  # to save memory

        # Manually calculating source power in on frequency band and several
        # time windows to compare to tf_lcmv results and test overlapping
        if (l_freq, h_freq) == freq_bins[0]:
            for time_window in time_windows:
                with warnings.catch_warnings(record=True):
                    data_cov = compute_covariance(epochs_band,
                                                  tmin=time_window[0],
                                                  tmax=time_window[1])
                stc_source_power = _lcmv_source_power(epochs.info,
                                                      forward,
                                                      noise_cov,
                                                      data_cov,
                                                      reg=reg,
                                                      label=label)
                source_power.append(stc_source_power.data)

    with warnings.catch_warnings(record=True):
        stcs = tf_lcmv(epochs,
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       reg=reg,
                       label=label)

    assert_true(len(stcs) == len(freq_bins))
    assert_true(stcs[0].shape[1] == 4)

    # Averaging all time windows that overlap the time period 0 to 100 ms
    source_power = np.mean(source_power, axis=0)

    # Selecting the first frequency bin in tf_lcmv results
    stc = stcs[0]

    # Comparing tf_lcmv results with _lcmv_source_power results
    assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])

    # Test if using unsupported max-power orientation is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths,
                  freq_bins=freq_bins,
                  pick_ori='max-power')

    # Test if incorrect number of noise CSDs is detected
    # Test if incorrect number of noise covariances is detected
    assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,
                  tmax, tstep, win_lengths, freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths=[0, 1, 2],
                  freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep=0.15,
                  win_lengths=[0.2, 0.1],
                  freq_bins=freq_bins)

    # Test correct detection of preloaded epochs objects that do not contain
    # the underlying raw object
    epochs_preloaded = mne.Epochs(raw,
                                  events,
                                  event_id,
                                  tmin,
                                  tmax,
                                  proj=True,
                                  baseline=(None, 0),
                                  preload=True)
    assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs,
                  tmin, tmax, tstep, win_lengths, freq_bins)

    with warnings.catch_warnings(record=True):  # not enough samples
        # Pass only one epoch to test if subtracting evoked
        # responses yields zeros
        stcs = tf_lcmv(epochs[0],
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       subtract_evoked=True,
                       reg=reg,
                       label=label)

    assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
def test_mvt_with_df_one_is_cauchy():
    x = [9, 7, 4, 1, -3, 9, 0, -3, -1, 3]
    val = multivariate_t.pdf(x, df=1)
    ans = cauchy.pdf(x)
    assert_array_almost_equal(val, ans)
Example #52
0
 def test_linear_edges(self):
     points, values = self._get_sample_4d()
     interp = RegularGridInterpolator(points, values)
     sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
     wanted = np.asarray([0., 1111.])
     assert_array_almost_equal(interp(sample), wanted)
Example #53
0
if 'test_copy' in examples:
    xx = X.copy()
    rhoyw, sigmayw = yule_walker(xx[:, 0], order=2)
    print rhoyw, sigmayw
    print(xx == X).all()  # test for unchanged array (fixed)

    yy = Y.copy()
    rhoyw, sigmayw = yule_walker(yy, order=2)
    print rhoyw, sigmayw
    print(yy == Y).all()  # test for unchanged array (fixed)

if 4 in examples:
    print '\nExample 4: demeaned pure AR(2), GLSAR versus Yule_Walker'
    Ydemeaned = Y - Y.mean()
    model4 = GLSAR(Ydemeaned, rho=2)
    for i in range(5):
        results = model4.fit()
        print "AR coefficients:", model3.rho, results.params
        rho, sigma = yule_walker(results.resid, order=model4.order)
        model4 = GLSAR(Ydemeaned, rho=rho)

if 5 in examples:
    print '\nExample 5: pure AR(2), GLSAR iterative_fit versus Yule_Walker'
    model3a = GLSAR(Y, rho=1)
    res3a = model3a.iterative_fit(5)
    print res3a.params
    print model3a.rho
    rhoyw, sigmayw = yule_walker(Y, order=1)
    print rhoyw, sigmayw
    npt.assert_array_almost_equal(model3a.rho, rhoyw, 15)
Example #54
0
def test_precomputed():
    # SVC with a precomputed kernel.
    # We test it with a toy dataset and with iris.
    clf = svm.SVC(kernel='precomputed')
    # Gram matrix for train data (square matrix)
    # (we use just a linear kernel)
    K = np.dot(X, np.array(X).T)
    clf.fit(K, Y)
    # Gram matrix for test data (rectangular matrix)
    KT = np.dot(T, np.array(X).T)
    pred = clf.predict(KT)
    assert_raises(ValueError, clf.predict, KT.T)

    assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
    assert_array_equal(clf.support_, [1, 3])
    assert_array_equal(clf.intercept_, [0])
    assert_array_almost_equal(clf.support_, [1, 3])
    assert_array_equal(pred, true_result)

    # Gram matrix for test data but compute KT[i,j]
    # for support vectors j only.
    KT = np.zeros_like(KT)
    for i in range(len(T)):
        for j in clf.support_:
            KT[i, j] = np.dot(T[i], X[j])

    pred = clf.predict(KT)
    assert_array_equal(pred, true_result)

    # same as before, but using a callable function instead of the kernel
    # matrix. kernel is just a linear kernel

    kfunc = lambda x, y: np.dot(x, y.T)
    clf = svm.SVC(gamma='scale', kernel=kfunc)
    clf.fit(X, Y)
    pred = clf.predict(T)

    assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
    assert_array_equal(clf.intercept_, [0])
    assert_array_almost_equal(clf.support_, [1, 3])
    assert_array_equal(pred, true_result)

    # test a precomputed kernel with the iris dataset
    # and check parameters against a linear SVC
    clf = svm.SVC(kernel='precomputed')
    clf2 = svm.SVC(kernel='linear')
    K = np.dot(iris.data, iris.data.T)
    clf.fit(K, iris.target)
    clf2.fit(iris.data, iris.target)
    pred = clf.predict(K)
    assert_array_almost_equal(clf.support_, clf2.support_)
    assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
    assert_array_almost_equal(clf.intercept_, clf2.intercept_)
    assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)

    # Gram matrix for test data but compute KT[i,j]
    # for support vectors j only.
    K = np.zeros_like(K)
    for i in range(len(iris.data)):
        for j in clf.support_:
            K[i, j] = np.dot(iris.data[i], iris.data[j])

    pred = clf.predict(K)
    assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)

    clf = svm.SVC(gamma='scale', kernel=kfunc)
    clf.fit(iris.data, iris.target)
    assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
Example #55
0
def test_lcmv():
    """Test LCMV with evoked data and single trials
    """
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data()

    for fwd in [forward, forward_vol]:
        stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)
        stc.crop(0.02, None)

        stc_pow = np.sum(stc.data, axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert_true(0.09 < tmax < 0.105, tmax)
        assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))

        if fwd is forward:
            # Test picking normal orientation (surface source space only)
            stc_normal = lcmv(evoked,
                              forward_surf_ori,
                              noise_cov,
                              data_cov,
                              reg=0.01,
                              pick_ori="normal")
            stc_normal.crop(0.02, None)

            stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
            idx = np.argmax(stc_pow)
            max_stc = stc_normal.data[idx]
            tmax = stc_normal.times[np.argmax(max_stc)]

            assert_true(0.04 < tmax < 0.11, tmax)
            assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))

            # The amplitude of normal orientation results should always be
            # smaller than free orientation results
            assert_true((np.abs(stc_normal.data) <= stc.data).all())

        # Test picking source orientation maximizing output source power
        stc_max_power = lcmv(evoked,
                             fwd,
                             noise_cov,
                             data_cov,
                             reg=0.01,
                             pick_ori="max-power")
        stc_max_power.crop(0.02, None)
        stc_pow = np.sum(stc_max_power.data, axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc_max_power.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert_true(0.09 < tmax < 0.11, tmax)
        assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))

        # Maximum output source power orientation results should be similar to
        # free orientation results
        assert_true((stc_max_power.data - stc.data < 1).all())

    # Test if fixed forward operator is detected when picking normal or
    # max-power orientation
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward_fixed,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="normal")
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward_fixed,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="max-power")

    # Test if non-surface oriented forward operator is detected when picking
    # normal orientation
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="normal")

    # Test if volume forward operator is detected when picking normal
    # orientation
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward_vol,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="normal")

    # Now test single trial using fixed orientation forward solution
    # so we can compare it to the evoked solution
    stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
    stcs_ = lcmv_epochs(epochs,
                        forward_fixed,
                        noise_cov,
                        data_cov,
                        reg=0.01,
                        return_generator=True)
    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)

    epochs.drop_bad_epochs()
    assert_true(len(epochs.events) == len(stcs))

    # average the single trial estimates
    stc_avg = np.zeros_like(stcs[0].data)
    for this_stc in stcs:
        stc_avg += this_stc.data
    stc_avg /= len(stcs)

    # compare it to the solution using evoked with fixed orientation
    stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)
    assert_array_almost_equal(stc_avg, stc_fixed.data)

    # use a label so we have few source vertices and delayed computation is
    # not used
    stcs_label = lcmv_epochs(epochs,
                             forward_fixed,
                             noise_cov,
                             data_cov,
                             reg=0.01,
                             label=label)

    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
Example #56
0
 def test_simple_real(self):
     a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
Example #57
0
 def test_linear_xi1d(self):
     points, values = self._get_sample_4d_2()
     interp = RegularGridInterpolator(points, values)
     sample = np.asarray([0.1, 0.1, 10., 9.])
     wanted = 1001.1
     assert_array_almost_equal(interp(sample), wanted)
Example #58
0
def test_consistent_proba():
    a = svm.SVC(probability=True, max_iter=1, random_state=0)
    proba_1 = a.fit(X, Y).predict_proba(X)
    a = svm.SVC(probability=True, max_iter=1, random_state=0)
    proba_2 = a.fit(X, Y).predict_proba(X)
    assert_array_almost_equal(proba_1, proba_2)
Example #59
0
def assert_array_almost_equal(a, b, **kwargs):
    testing.assert_array_almost_equal(to_numpy(a), to_numpy(b), **kwargs)
Example #60
-1
def test_tfidf_no_smoothing():
    X = [[1, 1, 1],
         [1, 1, 0],
         [1, 0, 0]]
    tr = TfidfTransformer(smooth_idf=False, norm='l2')
    tfidf = tr.fit_transform(X).toarray()
    assert_true((tfidf >= 0).all())

    # check normalization
    assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])

    # the lack of smoothing make IDF fragile in the presence of feature with
    # only zeros
    X = [[1, 1, 0],
         [1, 1, 0],
         [1, 0, 0]]
    tr = TfidfTransformer(smooth_idf=False, norm='l2')

    # First we need to verify that numpy here provides div 0 warnings
    with warnings.catch_warnings(record=True) as w:
        1. / np.array([0.])
        numpy_provides_div0_warning = len(w) == 1

    with warnings.catch_warnings(record=True) as w:
        tfidf = tr.fit_transform(X).toarray()
        if not numpy_provides_div0_warning:
            raise SkipTest("Numpy does not provide div 0 warnings.")
        assert_equal(len(w), 1)
        # For Python 3 compatibility
        if hasattr(w[0].message, 'args'):
            assert_true("divide by zero" in w[0].message.args[0])
        else:
            assert_true("divide by zero" in w[0].message)