Example #1
0
def test_read_write_info():
    """Test IO of info
    """
    tempdir = _TempDir()
    info = read_info(raw_fname)
    temp_file = op.join(tempdir, 'info.fif')
    # check for bug `#1198`
    info['dev_head_t']['trans'] = np.eye(4)
    t1 = info['dev_head_t']['trans']
    write_info(temp_file, info)
    info2 = read_info(temp_file)
    t2 = info2['dev_head_t']['trans']
    assert_true(len(info['chs']) == len(info2['chs']))
    assert_array_equal(t1, t2)
    # proc_history (e.g., GH#1875)
    creator = u'é'
    info = read_info(chpi_fname)
    info['proc_history'][0]['creator'] = creator
    info['hpi_meas'][0]['creator'] = creator
    info['subject_info']['his_id'] = creator
    write_info(temp_file, info)
    info = read_info(temp_file)
    assert_equal(info['proc_history'][0]['creator'], creator)
    assert_equal(info['hpi_meas'][0]['creator'], creator)
    assert_equal(info['subject_info']['his_id'], creator)
Example #2
0
    def test_number_of_edges(self):
        N = 100
        G = dag.cube_space_graph(N, 2, 0.)
        assert_true(G.number_of_edges() == 0)

        G = dag.cube_space_graph(N, 1, 1.)
        assert_equal(G.number_of_edges(), (N*(N-1)/2))
Example #3
0
def test_em_gmm_largedim():
    # testing the GMM model in larger dimensions
    
    # generate some data
    dim = 10
    x = nr.randn(100, dim)
    x[:30] += 2
    
    # estimate different GMMs of that data
    maxiter, delta = 100, 1.e-4
    
    for k in range(2, 3):
        lgmm = GMM(k,dim)
        bgmm = lgmm.initialize_and_estimate(x, None, maxiter, delta, ninit=5)
        
    z = bgmm.map_label(x)

    # define the correct labelling
    u = np.zeros(100)
    u[:30] = 1

    #check the correlation between the true labelling
    # and the computed one
    eta = np.absolute(np.dot(z - z.mean(), u - u.mean()) /\
                          (np.std(z) * np.std(u) * 100))
    assert_true(eta > 0.3)
Example #4
0
def test_make_dig_points():
    """Test application of Polhemus HSP to info"""
    dig_points = _read_dig_points(hsp_fname)
    info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
    assert_false(info['dig'])

    info['dig'] = _make_dig_points(dig_points=dig_points)
    assert_true(info['dig'])
    assert_array_equal(info['dig'][0]['r'], [-106.93, 99.80, 68.81])

    dig_points = _read_dig_points(elp_fname)
    nasion, lpa, rpa = dig_points[:3]
    info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
    assert_false(info['dig'])

    info['dig'] = _make_dig_points(nasion, lpa, rpa, dig_points[3:], None)
    assert_true(info['dig'])
    idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)
    assert_array_equal(info['dig'][idx]['r'],
                       np.array([1.3930, 13.1613, -4.6967]))
    assert_raises(ValueError, _make_dig_points, nasion[:2])
    assert_raises(ValueError, _make_dig_points, None, lpa[:2])
    assert_raises(ValueError, _make_dig_points, None, None, rpa[:2])
    assert_raises(ValueError, _make_dig_points, None, None, None,
                  dig_points[:, :2])
    assert_raises(ValueError, _make_dig_points, None, None, None, None,
                  dig_points[:, :2])
Example #5
0
def test_em_gmm_cv():
    # Comparison of different GMMs using cross-validation

    # generate some data
    dim = 2
    xtrain = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim)))
    xtest = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim)))
    
    #estimate different GMMs for xtrain, and test it on xtest
    prec_type = 'full'
    k, maxiter, delta = 2, 300, 1.e-4
    ll = []
    
    # model 1
    lgmm = GMM(k,dim,prec_type)
    lgmm.initialize(xtrain)
    bic = lgmm.estimate(xtrain,maxiter, delta)
    ll.append(lgmm.test(xtest).mean())

    # model 2
    prec_type = 'diag'
    lgmm = GMM(k, dim, prec_type)
    lgmm.initialize(xtrain)
    bic = lgmm.estimate(xtrain, maxiter, delta)
    ll.append(lgmm.test(xtest).mean())
        
    for  k in [1, 3, 10]:
        lgmm = GMM(k,dim,prec_type)
        lgmm.initialize(xtrain)
        ll.append(lgmm.test(xtest).mean())
            
    assert_true(ll[4] < ll[1])
def test_dict_assignment():
    d = dict()
    c = DictTrait()
    c.value = d
    d["a"] = 5
    nt.assert_equal(d, c.value)
    nt.assert_true(c.value is d)
 def test_valid_signature(self):
     for example in self._examples:
         client_shared_secret = example["private_key"]
         client_certificate = example["certificate"]
         public_key = example["public_key"]
         url = example["url"]
         method = example["method"]
         oauth_params = example["oauth_params"]
         expected_signature = example["oauth_signature"]
         # Using the RSA private key.
         assert_equal(expected_signature,
                      generate_rsa_sha1_signature(client_shared_secret,
                                              method=method,
                                              url=url,
                                              oauth_params=oauth_params
                                              )
         )
         # Using the X.509 certificate.
         assert_true(verify_rsa_sha1_signature(
             client_certificate, expected_signature,
             method, url, oauth_params))
         # Using the RSA public key.
         assert_true(verify_rsa_sha1_signature(
             public_key, expected_signature,
             method, url, oauth_params))
def test_io_inverse_operator():
    """Test IO of inverse_operator
    """
    tempdir = _TempDir()
    inverse_operator = read_inverse_operator(fname_inv)
    x = repr(inverse_operator)
    assert_true(x)
    assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
    # just do one example for .gz, as it should generalize
    _compare_io(inverse_operator, '.gz')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_inverse_operator(inv_badname, inverse_operator)
        read_inverse_operator(inv_badname)
    assert_naming(w, 'test_inverse.py', 2)

    # make sure we can write and read
    inv_fname = op.join(tempdir, 'test-inv.fif')
    args = (10, 1. / 9., 'dSPM')
    inv_prep = prepare_inverse_operator(inverse_operator, *args)
    write_inverse_operator(inv_fname, inv_prep)
    inv_read = read_inverse_operator(inv_fname)
    _compare(inverse_operator, inv_read)
    inv_read_prep = prepare_inverse_operator(inv_read, *args)
    _compare(inv_prep, inv_read_prep)
    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
    _compare(inv_prep, inv_prep_prep)
def _compare(a, b):
    """Compare two python objects."""
    global last_keys
    skip_types = ['whitener', 'proj', 'reginv', 'noisenorm', 'nchan',
                  'command_line', 'working_dir', 'mri_file', 'mri_id']
    try:
        if isinstance(a, (dict, Info)):
            assert_true(isinstance(b, (dict, Info)))
            for k, v in six.iteritems(a):
                if k not in b and k not in skip_types:
                    raise ValueError('First one had one second one didn\'t:\n'
                                     '%s not in %s' % (k, b.keys()))
                if k not in skip_types:
                    last_keys.pop()
                    last_keys = [k] + last_keys
                    _compare(v, b[k])
            for k, v in six.iteritems(b):
                if k not in a and k not in skip_types:
                    raise ValueError('Second one had one first one didn\'t:\n'
                                     '%s not in %s' % (k, a.keys()))
        elif isinstance(a, list):
            assert_true(len(a) == len(b))
            for i, j in zip(a, b):
                _compare(i, j)
        elif isinstance(a, sparse.csr.csr_matrix):
            assert_array_almost_equal(a.data, b.data)
            assert_equal(a.indices, b.indices)
            assert_equal(a.indptr, b.indptr)
        elif isinstance(a, np.ndarray):
            assert_array_almost_equal(a, b)
        else:
            assert_equal(a, b)
    except Exception:
        print(last_keys)
        raise
Example #10
0
    def test_similarity_lookup(self):
        """Test Similarity Lookup.

        Tests that a similarity lookup for a kindle returns 10 results.
        """
        products = self.amazon.similarity_lookup(ItemId="B0051QVF7A")
        assert_true(len(products) > 5)
Example #11
0
def test_check_threshold():
    adjacency_matrix = np.array([[1., 2.],
                                 [2., 1.]])
    name = 'edge_threshold'
    calculate = 'fast_abs_percentile'
    # a few not correctly formatted strings for 'edge_threshold'
    wrong_edge_thresholds = ['0.1', '10', '10.2.3%', 'asdf%']
    for wrong_edge_threshold in wrong_edge_thresholds:
        assert_raises_regex(ValueError,
                            '{0}.+should be a number followed by '
                            'the percent sign'.format(name),
                            check_threshold,
                            wrong_edge_threshold, adjacency_matrix,
                            calculate, name)

    threshold = object()
    assert_raises_regex(TypeError,
                        '{0}.+should be either a number or a string'.format(name),
                        check_threshold,
                        threshold, adjacency_matrix,
                        calculate, name)

    # To check if it also gives the score which is expected
    assert_true(1. < check_threshold("50%", adjacency_matrix,
                                     percentile_calculate=fast_abs_percentile,
                                     name='threshold') <= 2.)
Example #12
0
def test_unicode_decode_error():
    # decode_error default to strict, so this should fail
    # First, encode (as bytes) a unicode string.
    text = "J'ai mang\xe9 du kangourou  ce midi, c'\xe9tait pas tr\xeas bon."
    text_bytes = text.encode('utf-8')

    # Then let the Analyzer try to decode it as ascii. It should fail,
    # because we have given it an incorrect encoding.
    wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
    assert_raises(UnicodeDecodeError, wa, text_bytes)

    ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
                         encoding='ascii').build_analyzer()
    assert_raises(UnicodeDecodeError, ca, text_bytes)

    # Check the old interface
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")

        ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
                             charset='ascii').build_analyzer()
        assert_raises(UnicodeDecodeError, ca, text_bytes)

        assert_equal(len(w), 1)
        assert_true(issubclass(w[0].category, DeprecationWarning))
        assert_true("charset" in str(w[0].message).lower())
Example #13
0
def test_WilsonLT_Defaults_attrs1():
    '''Confirm default geo_all equivalence in derived classes with base.'''
    geos_all = [
        '0-0-2000',
        '0-0-1000',
        '1000-0-0',
        '600-0-800',
        '600-0-400S',
        '500-500-0',
        '400-[200]-0',
        '400-200-800',
        '400-[200]-800',
        '400-200-400S',
        '400-[100,100]-0',
        '500-[250,250]-0',
        '400-[100,100]-800',
        '400-[100,100]-400S',
        '400-[100,100,100]-800',
        '500-[50,50,50,50]-0',
        '400-[100,100,100,100]-800',
        '400-[100,100,100,100,100]-800'
    ]
    default_attr1 = bdft.geos_all                          # Base attribute
    default_attr2 = dft.geos_all                           # Sub-class attribute
    expected = geos_all
    #print(set(default_dict))
    #print(set(expected)
    # Allows extension in BaseDefaults().geo_inputs
    actual1 = (set(default_attr1) >= set(expected))
    actual2 = (set(default_attr2) >= set(expected))
    #print(actual1)
    # TODO: is this supposed to be assert_true?
    nt.assert_true(actual1, expected)
    nt.assert_true(actual2, expected)
Example #14
0
    def test_multiple_fit(self):
        """Test multiple calls of fit w/ different shaped inputs."""
        clf = self.factory(alpha=0.01, n_iter=5, shuffle=False)
        clf.fit(X, Y)
        assert_true(hasattr(clf, "coef_"))

        clf.fit(X[:, :-1], Y)
Example #15
0
    def test_default_diverging_vlims(self):

        p = mat._HeatMapper(self.df_norm, **self.default_kws)
        vlim = max(abs(self.x_norm.min()), abs(self.x_norm.max()))
        nt.assert_equal(p.vmin, -vlim)
        nt.assert_equal(p.vmax, vlim)
        nt.assert_true(p.divergent)
Example #16
0
def test_netcdf_write():
    """Test generic write_netcdf."""
    if not WITH_NETCDF4:
        raise SkipTest('netCDF4 package not installed')

    field = RasterModelGrid(4, 3)
    field.add_field('node', 'topographic__elevation', np.arange(12.))

    with cdtemp() as _:
        write_netcdf('test.nc', field, format='NETCDF4')
        root = nc.Dataset('test.nc', 'r', format='NETCDF4')

        assert_equal(set(root.dimensions), set(['ni', 'nj', 'nt']))
        assert_equal(len(root.dimensions['ni']), 3)
        assert_equal(len(root.dimensions['nj']), 4)
        assert_true(len(root.dimensions['nt']), 1)
        assert_true(root.dimensions['nt'].isunlimited())

        assert_equal(set(root.variables),
                     set(['x', 'y', 'topographic__elevation']))

        assert_array_equal(root.variables['x'][:].flat,
                           np.array([0., 1., 2., 0., 1., 2., 0., 1., 2.,
                                     0., 1., 2., ]))
        assert_array_equal(root.variables['y'][:].flat,
                           np.array([0., 0., 0., 1., 1., 1., 2., 2., 2.,
                                     3., 3., 3., ]))
        assert_array_equal(root.variables['topographic__elevation'][:].flat,
                           field.at_node['topographic__elevation'])

        root.close()
Example #17
0
def test_decimate():
    """Test decimation of digitizer headshapes with too many points."""
    # load headshape and convert to meters
    hsp_mm = _get_ico_surface(5)['rr'] * 100
    hsp_m = hsp_mm / 1000.

    # save headshape to a file in mm in temporary directory
    tempdir = _TempDir()
    sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
    np.savetxt(sphere_hsp_path, hsp_mm)

    # read in raw data using spherical hsp, and extract new hsp
    with warnings.catch_warnings(record=True) as w:
        raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
    assert_true(any('more than' in str(ww.message) for ww in w))
    # collect headshape from raw (should now be in m)
    hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]

    # with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
    # should be a bit over 5000 points. If not, something is wrong or
    # decimation resolution has been purposefully changed
    assert_true(len(hsp_dec) > 5000)

    # should have similar size, distance from center
    dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
    dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
    hsp_rad = np.mean(dist)
    hsp_dec_rad = np.mean(dist_dec)
    assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
Example #18
0
 def test_incremental(self):
     sp = self.sp
     sp.push('%%cellm line2\n')
     nt.assert_true(sp.push_accepts_more()) #1
     sp.push('\n')
     # In this case, a blank line should end the cell magic
     nt.assert_false(sp.push_accepts_more()) #2
Example #19
0
def test_miyawaki2008():
    dataset = func.fetch_miyawaki2008(data_dir=tmpdir, verbose=0)
    assert_equal(len(dataset.func), 32)
    assert_equal(len(dataset.label), 32)
    assert_true(isinstance(dataset.mask, _basestring))
    assert_equal(len(dataset.mask_roi), 38)
    assert_equal(len(mock_url_request.urls), 1)
Example #20
0
def test_ica_reset():
    """Test ICA resetting"""
    raw = Raw(raw_fname).crop(0.5, stop, False)
    raw.load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]

    run_time_attrs = (
        '_pre_whitener',
        'unmixing_matrix_',
        'mixing_matrix_',
        'n_components_',
        'n_samples_',
        'pca_components_',
        'pca_explained_variance_',
        'pca_mean_'
    )
    with warnings.catch_warnings(record=True):
        ica = ICA(
            n_components=3, max_pca_components=3, n_pca_components=3,
            method='fastica', max_iter=1).fit(raw, picks=picks)

    assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
    ica._reset()
    assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
Example #21
0
def testR(d=simple(), size=500):

    X = random_from_categorical_formula(d, size)

    X = ML.rec_append_fields(X, 'response', np.random.standard_normal(size))
    fname = tempfile.mktemp()
    ML.rec2csv(X, fname)
    Rstr = '''
    data = read.table("%s", sep=',', header=T)
    cur.lm = lm(response ~ %s, data)
    COEF = coef(cur.lm)
    ''' % (fname, d.Rstr)
    rpy2.robjects.r(Rstr)
    remove(fname)
    nR = list(np.array(rpy2.robjects.r("names(COEF)")))

    nt.assert_true('(Intercept)' in nR)
    nR.remove("(Intercept)")
    nF = [str(t).replace("_","").replace("*",":") for t in d.formula.terms]
             
    nR = sorted([sorted(n.split(":")) for n in nR])

    nt.assert_true('1' in nF)
    nF.remove('1')

    nF = sorted([sorted(n.split(":")) for n in nF])
    nt.assert_equal(nR, nF)

    return d, X, nR, nF
Example #22
0
 def test_validate_name(self):
   # Test invalid names
   invalid_name = '/invalid'
   response = self.client.post('/desktop/api2/doc/mkdir', {'parent_uuid': json.dumps(self.home_dir.uuid), 'name': json.dumps(invalid_name)})
   data = json.loads(response.content)
   assert_equal(-1, data['status'], data)
   assert_true('invalid character' in data['message'])
def given_a_created_product_with_name_group1(step, product_id):

    body = dict_to_xml(default_product(name=product_id))
    response = api_utils.add_new_product(headers=world.headers, body=body)
    assert_true(response.ok, response.content)
    print response.content
    world.product_id = response.json()[PRODUCT_NAME]
Example #24
0
def step_impl(context):
    driver = context.driver
    util = context.util

    element, parent, parent_text = get_element_parent_and_parent_text(
        driver, ".__start_label._title_label")

    # This is where our selection will end
    end = util.element_screen_center(element)
    end["left"] += 2  # Move it off-center for this test

    element.click()
    wedutil.wait_for_caret_to_be_in(util, parent)

    # From the label to before the first letter and then past the
    # first letter.
    ActionChains(driver)\
        .send_keys(*[Keys.ARROW_RIGHT] * 2)\
        .perform()

    # We need to get the location of the caret.
    start = wedutil.caret_selection_pos(driver)

    select_text(context, start, end)

    assert_true(util.is_something_selected(), "something must be selected")

    context.expected_selection = parent_text[0:1]
    context.selection_parent = parent
    context.caret_screen_position = wedutil.caret_screen_pos(driver)
Example #25
0
 def test_registered_classes_can_be_set_as_attrs(self):
     app_registry.register('dummy', DummyAppDataContainer)
     art = Article()
     art.app_data.dummy = {'answer': 42}
     tools.assert_true(isinstance(art.app_data.dummy, DummyAppDataContainer))
     tools.assert_equals(DummyAppDataContainer(art, {'answer': 42}), art.app_data.dummy)
     tools.assert_equals({'dummy': {'answer': 42}}, art.app_data)
Example #26
0
def step_impl(context, direction):
    direction = direction.strip()
    driver = context.driver
    util = context.util

    if direction == "":
        keys = (
            # From the label to before the first letter and then past the
            # first letter.
            [Keys.ARROW_RIGHT] * 2 +
            # This moves two caracters to the right with shift down.
            [Keys.SHIFT] + [Keys.ARROW_RIGHT] * 2 + [Keys.SHIFT])
    elif direction == "backwards":
        keys = (
            # From the label to before the first letter and then past the
            # first letter, and then two more to the right.
            [Keys.ARROW_RIGHT] * (2 + 2) +
            # This moves two caracters to the left with shift down.
            [Keys.SHIFT] + [Keys.ARROW_LEFT] * 2 + [Keys.SHIFT])
    else:
        raise ValueError("unexpected direction: " + direction)

    element, _, parent_text = get_element_parent_and_parent_text(
        driver, ".__start_label._title_label")

    ActionChains(driver)\
        .click(element)\
        .perform()

    util.send_keys(element, keys)

    assert_true(util.is_something_selected(), "something must be selected")

    context.expected_selection = parent_text[1:3]
Example #27
0
def step_impl(context, what):
    driver = context.driver
    util = context.util

    if what == "an element":
        what = "the first title element"

    if what == "the first title element":
        selector = ".__start_label._title_label"
    elif what == 'the first paragraph in "body"':
        selector = ".body .__start_label._p_label"
    else:
        raise ValueError("unknown value for what: " + what)

    element, parent, parent_text = get_element_parent_and_parent_text(
        driver, selector)

    ActionChains(driver)\
        .click(element) \
        .perform()

    util.send_keys(element,
                   # From the label to before the first letter.
                   [Keys.ARROW_RIGHT] +
                   # This select the whole text of the element.
                   [Keys.SHIFT] + [Keys.ARROW_RIGHT] * len(parent_text) +
                   [Keys.SHIFT])

    assert_true(util.is_something_selected(), "something must be selected")
    text = util.get_selection_text()
    assert_equal(text, parent_text, "expected selection")

    context.expected_selection = text
    context.selection_parent = parent
    context.caret_screen_position = wedutil.caret_screen_pos(driver)
def given_a_created_product_with_attributes_and_name_group1(step, product_id):

    metadatas = create_default_metadata_or_attributes_list(2)
    body = dict_to_xml(default_product(name=product_id, metadata=metadatas))
    response = api_utils.add_new_product(headers=world.headers, body=body)
    assert_true(response.ok, response.content)
    world.product_id = response.json()[PRODUCT_NAME]
Example #29
0
def step_impl(context, what):
    driver = context.driver
    util = context.util

    if what in ("an element's label", "the end label of an element"):
        selector = ".__end_label._title_label"
    elif what == "the end label of the last paragraph":
        selector = ".body .__end_label._p_label:last"
    elif what == 'the end label of the first "addrLine" element':
        selector = ".__end_label._addrLine_label"
    else:
        raise Exception("unknown choice: " + what)

    # Faster than using 4 Selenium operations.
    button, parent, button_class, parent_text = driver.execute_script("""
    var selector = arguments[0];

    var button = jQuery(selector)[0];
    var parent = button.parentNode;
    var parent_text = jQuery(parent).contents().filter(function() {
       return this.nodeType == Node.TEXT_NODE;
    }).text();
    return [button, parent, button.className, parent_text];
    """, selector)
    context.clicked_element = button
    context.clicked_element_parent = parent
    context.clicked_element_parent_initial_text = parent_text
    assert_true("_label_clicked" not in button_class.split())
    ActionChains(driver)\
        .click(button)\
        .perform()
    context.context_menu_for = None
Example #30
0
def test_ica_rank_reduction():
    """Test recovery of full data when no source is rejected"""
    # Most basic recovery
    raw = Raw(raw_fname).crop(0.5, stop, False)
    raw.load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]
    n_components = 5
    max_pca_components = len(picks)
    for n_pca_components in [6, 10]:
        with warnings.catch_warnings(record=True):  # non-convergence
            warnings.simplefilter('always')
            ica = ICA(n_components=n_components,
                      max_pca_components=max_pca_components,
                      n_pca_components=n_pca_components,
                      method='fastica', max_iter=1).fit(raw, picks=picks)

        rank_before = raw.estimate_rank(picks=picks)
        assert_equal(rank_before, len(picks))
        raw_clean = ica.apply(raw, copy=True)
        rank_after = raw_clean.estimate_rank(picks=picks)
        # interaction between ICA rejection and PCA components difficult
        # to preduct. Rank_after often seems to be 1 higher then
        # n_pca_components
        assert_true(n_components < n_pca_components <= rank_after <=
                    rank_before)
Example #31
0
  def test_copy(self):
    prefix = self.cluster.fs_prefix + '/test-copy'

    PATH_1 = '%s/1' % prefix
    PATH_2 = '%s/2' % prefix
    SUB_PATH1_1 = '%s/1' % PATH_1
    SUB_PATH1_2 = '%s/2' % PATH_1
    SUB_PATH1_3 = '%s/3' % PATH_1
    SUB_PATH2_1 = '%s/1' % PATH_2
    SUB_PATH2_2 = '%s/2' % PATH_2
    SUB_PATH2_3 = '%s/3' % PATH_2
    self.cluster.fs.mkdir(prefix)
    self.cluster.fs.mkdir(PATH_1)
    self.cluster.fs.mkdir(PATH_2)
    self.cluster.fs.mkdir(SUB_PATH1_1)
    self.cluster.fs.mkdir(SUB_PATH1_2)
    self.cluster.fs.mkdir(SUB_PATH1_3)

    assert_true(self.cluster.fs.exists(SUB_PATH1_1))
    assert_true(self.cluster.fs.exists(SUB_PATH1_2))
    assert_true(self.cluster.fs.exists(SUB_PATH1_3))
    assert_false(self.cluster.fs.exists(SUB_PATH2_1))
    assert_false(self.cluster.fs.exists(SUB_PATH2_2))
    assert_false(self.cluster.fs.exists(SUB_PATH2_3))

    self.c.post('/filebrowser/copy', dict(src_path=[SUB_PATH1_1], dest_path=PATH_2))
    assert_true(self.cluster.fs.exists(SUB_PATH1_1))
    assert_true(self.cluster.fs.exists(SUB_PATH1_2))
    assert_true(self.cluster.fs.exists(SUB_PATH1_3))
    assert_true(self.cluster.fs.exists(SUB_PATH2_1))
    assert_false(self.cluster.fs.exists(SUB_PATH2_2))
    assert_false(self.cluster.fs.exists(SUB_PATH2_3))

    self.c.post('/filebrowser/copy', dict(src_path=[SUB_PATH1_2, SUB_PATH1_3], dest_path=PATH_2))
    assert_true(self.cluster.fs.exists(SUB_PATH1_1))
    assert_true(self.cluster.fs.exists(SUB_PATH1_2))
    assert_true(self.cluster.fs.exists(SUB_PATH1_3))
    assert_true(self.cluster.fs.exists(SUB_PATH2_1))
    assert_true(self.cluster.fs.exists(SUB_PATH2_2))
    assert_true(self.cluster.fs.exists(SUB_PATH2_3))
Example #32
0
def test_Entity():
    # Test the basics of creating and accessing properties on an entity
    for i in range(2):
        e = Entity(name='Test object',
                   description='I hope this works',
                   annotations=dict(foo=123,
                                    nerds=['chris', 'jen', 'janey'],
                                    annotations='How confusing!'),
                   properties=dict(
                       annotations='/repo/v1/entity/syn1234/annotations',
                       md5='cdef636522577fc8fb2de4d95875b27c',
                       parentId='syn1234'),
                   concreteType='org.sagebionetworks.repo.model.Data')

        # Should be able to create an Entity from an Entity
        if i == 1:
            e = Entity.create(e)

        assert_equals(e.parentId, 'syn1234')
        assert_equals(e['parentId'], 'syn1234')
        assert_equals(e.properties['parentId'], 'syn1234')
        assert_equals(e.properties.parentId, 'syn1234')

        assert_equals(e.foo, 123)
        assert_equals(e['foo'], 123)
        assert_equals(e.annotations['foo'], 123)
        assert_equals(e.annotations.foo, 123)

        assert_true(hasattr(e, 'parentId'))
        assert_true(hasattr(e, 'foo'))
        assert_false(hasattr(e, 'qwerqwer'))

        # Annotations is a bit funny, because there is a property call
        # 'annotations', which will be masked by a member of the object
        # called 'annotations'. Because annotations are open-ended, we
        # might even have an annotations called 'annotations', which gets
        # really confusing.
        assert_is_instance(e.annotations, collections.Mapping)
        assert_is_instance(e['annotations'], collections.Mapping)
        assert_equals(e.properties['annotations'],
                      '/repo/v1/entity/syn1234/annotations')
        assert_equals(e.properties.annotations,
                      '/repo/v1/entity/syn1234/annotations')
        assert_equals(e.annotations.annotations, 'How confusing!')
        assert_equals(e.annotations['annotations'], 'How confusing!')
        assert_equals(e.nerds, ['chris', 'jen', 'janey'])
        assert_true(
            all([
                k in e for k in [
                    'name', 'description', 'foo', 'nerds', 'annotations',
                    'md5', 'parentId'
                ]
            ]))

        # Test modifying properties
        e.description = 'Working, so far'
        assert_equals(e['description'], 'Working, so far')
        e['description'] = 'Wiz-bang flapdoodle'
        assert_equals(e.description, 'Wiz-bang flapdoodle')

        # Test modifying annotations
        e.foo = 999
        assert_equals(e.annotations['foo'], 999)
        e['foo'] = 12345
        assert_equals(e.annotations.foo, 12345)

        # Test creating a new annotation
        e['bar'] = 888
        assert_equals(e.annotations['bar'], 888)
        e['bat'] = 7788
        assert_equals(e.annotations['bat'], 7788)

        # Test replacing annotations object
        e.annotations = {
            'splat': 'a totally new set of annotations',
            'foo': 456
        }
        assert_equals(e.foo, 456)
        assert_equals(e['foo'], 456)
        assert_is_instance(e.annotations, collections.Mapping)
        assert_is_instance(e['annotations'], collections.Mapping)
        assert_equals(e.annotations.foo, 456)
        assert_equals(e.properties['annotations'],
                      '/repo/v1/entity/syn1234/annotations')
        assert_equals(e.properties.annotations,
                      '/repo/v1/entity/syn1234/annotations')

        # test unicode properties
        e.train = '時刻表には記載されない 月への列車が来ると聞いて'
        e.band = "Motörhead"
        e.lunch = "すし"
Example #33
0
def test_attrs():
    f = File('foo.xyz', parent='syn1234', foo='bar')
    assert_true(hasattr(f, 'parentId'))
    assert_true(hasattr(f, 'foo'))
    assert_true(hasattr(f, 'path'))
Example #34
0
 def test_known_attribute(self):
     assert_true(hasattr(CourseDescriptor, 'show_chat'))
     course = self.process_xml(CourseFactory.build(show_chat='true'))
     assert_true(course.show_chat)
     assert_not_in('show_chat', course.xml_attributes)
Example #35
0
 def test_generates_tokens(self):
     test_string = '1 + 1'
     assert_true(all(isinstance(item, Token) or isinstance(item, OpToken)
                     for item in Tokenizer(test_string)))
Example #36
0
def test_copy_files():
    cluster = pseudo_hdfs4.shared_cluster()

    try:
        c = make_logged_in_client()
        user = User.objects.get(username='******')

        prefix = '/tmp/test_copy_files'

        if cluster.fs.exists(prefix):
            cluster.fs.rmtree(prefix)

        # Jars in various locations
        deployment_dir = '%s/workspace' % prefix
        external_deployment_dir = '%s/deployment' % prefix
        jar_1 = '%s/udf1.jar' % prefix
        jar_2 = '%s/lib/udf2.jar' % prefix
        jar_3 = '%s/udf3.jar' % deployment_dir
        jar_4 = '%s/lib/udf4.jar' % deployment_dir  # Doesn't move
        jar_5 = 'udf5.jar'
        jar_6 = 'lib/udf6.jar'  # Doesn't move

        cluster.fs.mkdir(prefix)
        cluster.fs.create(jar_1)
        cluster.fs.create(jar_2)
        cluster.fs.create(jar_3)
        cluster.fs.create(jar_4)
        cluster.fs.create(deployment_dir + '/' + jar_5)
        cluster.fs.create(deployment_dir + '/' + jar_6)

        class MockJob():
            XML_FILE_NAME = 'workflow.xml'

            def __init__(self):
                self.deployment_dir = deployment_dir
                self.nodes = [
                    Node({
                        'id': '1',
                        'type': 'mapreduce',
                        'properties': {
                            'jar_path': jar_1
                        }
                    }),
                    Node({
                        'id': '2',
                        'type': 'mapreduce',
                        'properties': {
                            'jar_path': jar_2
                        }
                    }),
                    Node({
                        'id': '3',
                        'type': 'java',
                        'properties': {
                            'jar_path': jar_3
                        }
                    }),
                    Node({
                        'id': '4',
                        'type': 'java',
                        'properties': {
                            'jar_path': jar_4
                        }
                    }),

                    # Workspace relative paths
                    Node({
                        'id': '5',
                        'type': 'java',
                        'properties': {
                            'jar_path': jar_5
                        }
                    }),
                    Node({
                        'id': '6',
                        'type': 'java',
                        'properties': {
                            'jar_path': jar_6
                        }
                    })
                ]

        submission = Submission(user,
                                job=MockJob(),
                                fs=cluster.fs,
                                jt=cluster.jt)

        submission._copy_files(deployment_dir, "<xml>My XML</xml>",
                               {'prop1': 'val1'})
        submission._copy_files(external_deployment_dir, "<xml>My XML</xml>",
                               {'prop1': 'val1'})

        assert_true(cluster.fs.exists(deployment_dir + '/workflow.xml'),
                    deployment_dir)
        assert_true(cluster.fs.exists(deployment_dir + '/job.properties'),
                    deployment_dir)

        # All sources still there
        assert_true(cluster.fs.exists(jar_1))
        assert_true(cluster.fs.exists(jar_2))
        assert_true(cluster.fs.exists(jar_3))
        assert_true(cluster.fs.exists(jar_4))
        assert_true(cluster.fs.exists(deployment_dir + '/' + jar_5))
        assert_true(cluster.fs.exists(deployment_dir + '/' + jar_6))

        # Lib
        deployment_dir = deployment_dir + '/lib'
        external_deployment_dir = external_deployment_dir + '/lib'

        list_dir_workspace = cluster.fs.listdir(deployment_dir)
        list_dir_deployement = cluster.fs.listdir(external_deployment_dir)

        # All destinations there
        assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf5.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf6.jar'),
                    list_dir_workspace)

        assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf5.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf6.jar'),
                    list_dir_deployement)

        stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
        stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
        stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
        stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
        stats_udf5 = cluster.fs.stats(deployment_dir + '/udf5.jar')
        stats_udf6 = cluster.fs.stats(deployment_dir + '/udf6.jar')

        submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>",
                               {'prop1': 'val1'})

        assert_not_equal(
            stats_udf1['fileId'],
            cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
        assert_not_equal(
            stats_udf2['fileId'],
            cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
        assert_not_equal(
            stats_udf3['fileId'],
            cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
        assert_equal(stats_udf4['fileId'],
                     cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
        assert_not_equal(
            stats_udf5['fileId'],
            cluster.fs.stats(deployment_dir + '/udf5.jar')['fileId'])
        assert_equal(stats_udf6['fileId'],
                     cluster.fs.stats(deployment_dir + '/udf6.jar')['fileId'])

        # Test _create_file()
        submission._create_file(deployment_dir, 'test.txt', data='Test data')
        assert_true(cluster.fs.exists(deployment_dir + '/test.txt'),
                    list_dir_workspace)

    finally:
        try:
            cluster.fs.rmtree(prefix)
        except:
            LOG.exception('failed to remove %s' % prefix)
Example #37
0
  def test_listdir(self):
    # Delete user's home if there's already something there
    home = self.cluster.fs.do_as_user('test', self.cluster.fs.get_home_dir)
    if self.cluster.fs.exists(home):
      self.cluster.fs.do_as_superuser(self.cluster.fs.rmtree, home)

    response = self.c.get('/filebrowser/')
    # Since we deleted the home directory... home_directory context should be None.
    assert_false(response.context['home_directory'], response.context['home_directory'])

    self.cluster.fs.do_as_superuser(self.cluster.fs.mkdir, home)
    self.cluster.fs.do_as_superuser(self.cluster.fs.chown, home, 'test', 'test')

    # These paths contain non-ascii characters. Your editor will need the
    # corresponding font library to display them correctly.
    #
    # We test that mkdir can handle unicode strings as well as byte strings.
    # And even when the byte string can't be decoded properly (big5), the listdir
    # still succeeds.
    orig_paths = [
      u'greek-Ελληνικά',
      u'chinese-漢語',
      'listdir%20.,<>~`!@$%^&()_-+="',
    ]

    prefix = home + '/test-filebrowser/'
    for path in orig_paths:
      self.c.post('/filebrowser/mkdir', dict(path=prefix, name=path))

    # Read the parent dir
    response = self.c.get('/filebrowser/view' + prefix)

    dir_listing = response.context['files']
    assert_equal(len(orig_paths) + 2, len(dir_listing))

    for dirent in dir_listing:
      path = dirent['name']
      if path in ('.', '..'):
        continue

      assert_true(path in orig_paths)

      # Drill down into the subdirectory
      url = urlparse.urlsplit(dirent['url'])[2]
      resp = self.c.get(url)

      # We are actually reading a directory
      assert_equal('.', resp.context['files'][1]['name'])
      assert_equal('..', resp.context['files'][0]['name'])

    # Test's home directory now exists. Should be returned.
    response = self.c.get('/filebrowser/view' + prefix)
    assert_equal(response.context['home_directory'], home)

    # Test URL conflicts with filenames
    stat_dir = '%sstat/dir' % prefix
    self.cluster.fs.do_as_user('test', self.cluster.fs.mkdir, stat_dir)
    response = self.c.get('/filebrowser/view%s' % stat_dir)
    assert_equal(stat_dir, response.context['path'])

    response = self.c.get('/filebrowser/view/test-filebrowser/?default_to_home')
    assert_true(re.search('%s$' % home, response['Location']))

    # Test path relative to home directory
    self.cluster.fs.do_as_user('test', self.cluster.fs.mkdir, '%s/test_dir' % home)
    response = self.c.get('/filebrowser/home_relative_view/test_dir')
    assert_equal('%s/test_dir' % home, response.context['path'])
def test_arithmetic():
    """Test evoked arithmetic."""
    ev = read_evokeds(fname, condition=0)
    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)

    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
    # data should be added according to their `nave` weights
    # nave = ev1.nave + ev2.nave
    ev = combine_evoked([ev1, ev2], weights='nave')
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))

    # with same trial counts, a bunch of things should be equivalent
    for weights in ('nave', 'equal', [0.5, 0.5]):
        ev = combine_evoked([ev1, ev1], weights=weights)
        assert_allclose(ev.data, ev1.data)
        assert_equal(ev.nave, 2 * ev1.nave)
        ev = combine_evoked([ev1, -ev1], weights=weights)
        assert_allclose(ev.data, 0., atol=1e-20)
        assert_equal(ev.nave, 2 * ev1.nave)
    ev = combine_evoked([ev1, -ev1], weights='equal')
    assert_allclose(ev.data, 0., atol=1e-20)
    assert_equal(ev.nave, 2 * ev1.nave)
    ev = combine_evoked([ev1, -ev2], weights='equal')
    expected = int(round(1. / (0.25 / ev1.nave + 0.25 / ev2.nave)))
    assert_equal(expected, 27)  # this is reasonable
    assert_equal(ev.nave, expected)

    # default comment behavior if evoked.comment is None
    old_comment1 = ev1.comment
    old_comment2 = ev2.comment
    ev1.comment = None
    ev = combine_evoked([ev1, -ev2], weights=[1, -1])
    assert_equal(ev.comment.count('unknown'), 2)
    assert_true('-unknown' in ev.comment)
    assert_true(' + ' in ev.comment)
    ev1.comment = old_comment1
    ev2.comment = old_comment2

    # equal weighting
    ev = combine_evoked([ev1, ev2], weights='equal')
    assert_allclose(ev.data, np.zeros_like(ev1.data))

    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
    ev = combine_evoked([ev1, ev2], weights=[1, 0])
    assert_equal(ev.nave, ev1.nave)
    assert_allclose(ev.data, ev1.data)

    # simple subtraction (like in oddball)
    ev = combine_evoked([ev1, ev2], weights=[1, -1])
    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))

    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
    assert_raises(ValueError, grand_average, [1, evoked1])
Example #39
0
 def test_form_dict_keys(self):
     for res in self.std_results:
         keys = sorted(list(res.keys()))
         for key in keys:
             assert_true(key in self.known_keys)
def test_suppress_dj_errors():
    """ test_suppress_dj_errors: dj errors suppressible w/o native py blobs """
    schema.schema.jobs.delete()
    with dj.config(enable_python_native_blobs=False):
        schema.ErrorClass.populate(reserve_jobs=True, suppress_errors=True)
    assert_true(len(schema.DjExceptionName()) == len(schema.schema.jobs) > 0)
Example #41
0
 def test_form_main_list(self):
     assert_true(isinstance(self.std_results, list))
def test_io_evoked():
    """Test IO for evoked data (fif + gz) with integer and str args."""
    tempdir = _TempDir()
    ave = read_evokeds(fname, 0)

    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]

    # This not being assert_array_equal due to windows rounding
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave.times, ave2.times)
    assert_equal(ave.nave, ave2.nave)
    assert_equal(ave._aspect_kind, ave2._aspect_kind)
    assert_equal(ave.kind, ave2.kind)
    assert_equal(ave.last, ave2.last)
    assert_equal(ave.first, ave2.first)
    assert_true(repr(ave))

    # test compressed i/o
    ave2 = read_evokeds(fname_gz, 0)
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))

    # test str access
    condition = 'Left Auditory'
    assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
    assert_raises(ValueError,
                  read_evokeds,
                  fname,
                  condition,
                  kind='standard_error')
    ave3 = read_evokeds(fname, condition)
    assert_array_almost_equal(ave.data, ave3.data, 19)

    # test read_evokeds and write_evokeds
    aves1 = read_evokeds(fname)[1::2]
    aves2 = read_evokeds(fname, [1, 3])
    aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
    for aves in [aves2, aves3, aves4]:
        for [av1, av2] in zip(aves1, aves):
            assert_array_almost_equal(av1.data, av2.data)
            assert_array_almost_equal(av1.times, av2.times)
            assert_equal(av1.nave, av2.nave)
            assert_equal(av1.kind, av2.kind)
            assert_equal(av1._aspect_kind, av2._aspect_kind)
            assert_equal(av1.last, av2.last)
            assert_equal(av1.first, av2.first)
            assert_equal(av1.comment, av2.comment)

    # test warnings on bad filenames
    fname2 = op.join(tempdir, 'test-bad-name.fif')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        write_evokeds(fname2, ave)
        read_evokeds(fname2)
    assert_naming(w, 'test_evoked.py', 2)

    # constructor
    assert_raises(TypeError, Evoked, fname)

    # MaxShield
    fname_ms = op.join(tempdir, 'test-ave.fif')
    assert_true(ave.info['maxshield'] is False)
    ave.info['maxshield'] = True
    ave.save(fname_ms)
    assert_raises(ValueError, read_evokeds, fname_ms)
    with warnings.catch_warnings(record=True) as w:
        aves = read_evokeds(fname_ms, allow_maxshield=True)
    assert_true(all('Elekta' in str(ww.message) for ww in w))
    assert_true(all(ave.info['maxshield'] is True for ave in aves))
    with warnings.catch_warnings(record=True) as w:
        aves = read_evokeds(fname_ms, allow_maxshield='yes')
    assert_equal(len(w), 0)
    assert_true(all(ave.info['maxshield'] is True for ave in aves))
Example #43
0
def test_coreg_model_with_fsaverage():
    """Test CoregModel with the fsaverage brain data."""
    tempdir = _TempDir()
    from mne.gui._coreg_gui import CoregModel

    mne.create_default_subject(subjects_dir=tempdir,
                               fs_home=op.join(subjects_dir, '..'))

    model = CoregModel()
    model.mri.use_high_res_head = False
    model.mri.subjects_dir = tempdir
    model.mri.subject = 'fsaverage'
    assert_true(model.mri.fid_ok)

    model.hsp.file = raw_path
    lpa_distance = model.lpa_distance
    nasion_distance = model.nasion_distance
    rpa_distance = model.rpa_distance
    avg_point_distance = np.mean(model.point_distance)

    # test hsp point omission
    model.trans_y = -0.008
    model.fit_auricular_points()
    model.omit_hsp_points(0.02)
    assert_equal(model.hsp.n_omitted, 1)
    model.omit_hsp_points(reset=True)
    assert_equal(model.hsp.n_omitted, 0)
    model.omit_hsp_points(0.02, reset=True)
    assert_equal(model.hsp.n_omitted, 1)

    # scale with 1 parameter
    model.n_scale_params = 1

    model.fit_scale_auricular_points()
    old_x = lpa_distance**2 + rpa_distance**2
    new_x = model.lpa_distance**2 + model.rpa_distance**2
    assert_true(new_x < old_x)

    model.fit_scale_fiducials()
    old_x = lpa_distance**2 + rpa_distance**2 + nasion_distance**2
    new_x = (model.lpa_distance**2 + model.rpa_distance**2 +
             model.nasion_distance**2)
    assert_true(new_x < old_x)

    model.fit_scale_hsp_points()
    avg_point_distance_1param = np.mean(model.point_distance)
    assert_true(avg_point_distance_1param < avg_point_distance)

    # scaling job
    sdir, sfrom, sto, scale, skip_fiducials, bemsol = \
        model.get_scaling_job('scaled', False, True)
    assert_equal(sdir, tempdir)
    assert_equal(sfrom, 'fsaverage')
    assert_equal(sto, 'scaled')
    assert_equal(scale, model.scale)
    assert_equal(set(bemsol), set(('inner_skull-bem', )))
    sdir, sfrom, sto, scale, skip_fiducials, bemsol = \
        model.get_scaling_job('scaled', False, False)
    assert_equal(bemsol, [])

    # scale with 3 parameters
    model.n_scale_params = 3
    model.fit_scale_hsp_points()
    assert_true(np.mean(model.point_distance) < avg_point_distance_1param)

    # test switching raw disables point omission
    assert_equal(model.hsp.n_omitted, 1)
    with warnings.catch_warnings(record=True):
        model.hsp.file = kit_raw_path
    assert_equal(model.hsp.n_omitted, 0)
Example #44
0
 def test_form_each_dict(self):
     for res in self.std_results:
         assert_true(isinstance(res, dict))
Example #45
0
def test_live_jobtracker():
    """
  Checks that LiveJobTracker never raises
  exceptions for most of its calls.
  """
    minicluster = pseudo_hdfs4.shared_cluster()

    jt = minicluster.jt
    # Make sure that none of the following
    # raise.
    assert_true(jt.queues())
    assert_true(jt.cluster_status())
    assert_true(jt.all_task_trackers())
    assert_true(jt.active_trackers())
    assert_true(jt.blacklisted_trackers())
    # not tested: task_tracker
    assert_true(jt.running_jobs())
    assert_true(jt.completed_jobs())
    assert_true(jt.failed_jobs())
    assert_true(jt.all_jobs())
    # not tested: get_job_counters
    assert_true(jt.get_current_time())
Example #46
0
def test_coreg_model():
    """Test CoregModel."""
    from mne.gui._coreg_gui import CoregModel
    tempdir = _TempDir()
    trans_dst = op.join(tempdir, 'test-trans.fif')

    model = CoregModel()
    assert_raises(RuntimeError, model.save_trans, 'blah.fif')

    model.mri.use_high_res_head = False

    model.mri.subjects_dir = subjects_dir
    model.mri.subject = 'sample'

    assert_false(model.mri.fid_ok)
    model.mri.lpa = [[-0.06, 0, 0]]
    model.mri.nasion = [[0, 0.05, 0]]
    model.mri.rpa = [[0.08, 0, 0]]
    assert_true(model.mri.fid_ok)

    model.hsp.file = raw_path
    assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
    assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
    assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
    assert_true(model.has_fid_data)

    lpa_distance = model.lpa_distance
    nasion_distance = model.nasion_distance
    rpa_distance = model.rpa_distance
    avg_point_distance = np.mean(model.point_distance)

    model.fit_auricular_points()
    old_x = lpa_distance**2 + rpa_distance**2
    new_x = model.lpa_distance**2 + model.rpa_distance**2
    assert_true(new_x < old_x)

    model.fit_fiducials()
    old_x = lpa_distance**2 + rpa_distance**2 + nasion_distance**2
    new_x = (model.lpa_distance**2 + model.rpa_distance**2 +
             model.nasion_distance**2)
    assert_true(new_x < old_x)

    model.fit_hsp_points()
    assert_true(np.mean(model.point_distance) < avg_point_distance)

    model.save_trans(trans_dst)
    trans = mne.read_trans(trans_dst)
    assert_allclose(trans['trans'], model.head_mri_trans)

    # test restoring trans
    x, y, z, rot_x, rot_y, rot_z = .1, .2, .05, 1.5, 0.1, -1.2
    model.trans_x = x
    model.trans_y = y
    model.trans_z = z
    model.rot_x = rot_x
    model.rot_y = rot_y
    model.rot_z = rot_z
    trans = model.head_mri_trans
    model.reset_traits(
        ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"])
    assert_equal(model.trans_x, 0)
    model.set_trans(trans)
    assert_almost_equal(model.trans_x, x)
    assert_almost_equal(model.trans_y, y)
    assert_almost_equal(model.trans_z, z)
    assert_almost_equal(model.rot_x, rot_x)
    assert_almost_equal(model.rot_y, rot_y)
    assert_almost_equal(model.rot_z, rot_z)

    # info
    assert_true(isinstance(model.fid_eval_str, string_types))
    assert_true(isinstance(model.points_eval_str, string_types))

    # scaling job
    sdir, sfrom, sto, scale, skip_fiducials, bemsol = \
        model.get_scaling_job('sample2', False, True)
    assert_equal(sdir, subjects_dir)
    assert_equal(sfrom, 'sample')
    assert_equal(sto, 'sample2')
    assert_equal(scale, model.scale)
    assert_equal(skip_fiducials, False)
    # find BEM files
    bems = set()
    for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
        match = re.match('sample-(.+-bem)\.fif', fname)
        if match:
            bems.add(match.group(1))
    assert_equal(set(bemsol), bems)
    sdir, sfrom, sto, scale, skip_fiducials, bemsol = \
        model.get_scaling_job('sample2', True, False)
    assert_equal(bemsol, [])
    assert_true(skip_fiducials)

    model.load_trans(fname_trans)
Example #47
0
def test_lcmv():
    """Test LCMV with evoked data and single trials
    """
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data()

    for fwd in [forward, forward_vol]:
        stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)
        stc.crop(0.02, None)

        stc_pow = np.sum(stc.data, axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert_true(0.09 < tmax < 0.105, tmax)
        assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))

        if fwd is forward:
            # Test picking normal orientation (surface source space only)
            stc_normal = lcmv(evoked,
                              forward_surf_ori,
                              noise_cov,
                              data_cov,
                              reg=0.01,
                              pick_ori="normal")
            stc_normal.crop(0.02, None)

            stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
            idx = np.argmax(stc_pow)
            max_stc = stc_normal.data[idx]
            tmax = stc_normal.times[np.argmax(max_stc)]

            assert_true(0.04 < tmax < 0.11, tmax)
            assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))

            # The amplitude of normal orientation results should always be
            # smaller than free orientation results
            assert_true((np.abs(stc_normal.data) <= stc.data).all())

        # Test picking source orientation maximizing output source power
        stc_max_power = lcmv(evoked,
                             fwd,
                             noise_cov,
                             data_cov,
                             reg=0.01,
                             pick_ori="max-power")
        stc_max_power.crop(0.02, None)
        stc_pow = np.sum(stc_max_power.data, axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc_max_power.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert_true(0.09 < tmax < 0.11, tmax)
        assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))

        # Maximum output source power orientation results should be similar to
        # free orientation results
        assert_true((stc_max_power.data - stc.data < 1).all())

    # Test if fixed forward operator is detected when picking normal or
    # max-power orientation
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward_fixed,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="normal")
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward_fixed,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="max-power")

    # Test if non-surface oriented forward operator is detected when picking
    # normal orientation
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="normal")

    # Test if volume forward operator is detected when picking normal
    # orientation
    assert_raises(ValueError,
                  lcmv,
                  evoked,
                  forward_vol,
                  noise_cov,
                  data_cov,
                  reg=0.01,
                  pick_ori="normal")

    # Now test single trial using fixed orientation forward solution
    # so we can compare it to the evoked solution
    stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
    stcs_ = lcmv_epochs(epochs,
                        forward_fixed,
                        noise_cov,
                        data_cov,
                        reg=0.01,
                        return_generator=True)
    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)

    epochs.drop_bad_epochs()
    assert_true(len(epochs.events) == len(stcs))

    # average the single trial estimates
    stc_avg = np.zeros_like(stcs[0].data)
    for this_stc in stcs:
        stc_avg += this_stc.data
    stc_avg /= len(stcs)

    # compare it to the solution using evoked with fixed orientation
    stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)
    assert_array_almost_equal(stc_avg, stc_fixed.data)

    # use a label so we have few source vertices and delayed computation is
    # not used
    stcs_label = lcmv_epochs(epochs,
                             forward_fixed,
                             noise_cov,
                             data_cov,
                             reg=0.01,
                             label=label)

    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
Example #48
0
def test_rgba16():
    s = hs.load(os.path.join(my_path, "tiff_files", "test_rgba16.tif"))
    data = np.load(os.path.join(my_path, "npy_files", "test_rgba16.npy"))
    assert_true((s.data == data).all())
Example #49
0
 def test_signature_generation(self):
     """ Testing the signature generation from Auth class """
     data = {'method': "falsemethodfortest", 'limit': "10"}
     signature = self.auth.sign(data)
     assert_true(isinstance(signature, basestring))
     assert_equal(len(signature), 32)
Example #50
0
def test_tf_lcmv():
    """Test TF beamforming based on LCMV
    """
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.io.Raw(fname_raw, preload=True)
    forward = mne.read_forward_solution(fname_fwd)

    event_id, tmin, tmax = 1, -0.2, 0.2

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info,
                           meg=True,
                           eeg=False,
                           stim=True,
                           eog=True,
                           exclude='bads',
                           selection=left_temporal_channels)

    # Read epochs
    epochs = mne.Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        proj=True,
                        picks=picks,
                        baseline=None,
                        preload=False,
                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
    epochs.drop_bad_epochs()

    freq_bins = [(4, 12), (15, 40)]
    time_windows = [(-0.1, 0.1), (0.0, 0.2)]
    win_lengths = [0.2, 0.2]
    tstep = 0.1
    reg = 0.05

    source_power = []
    noise_covs = []
    for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):
        raw_band = raw.copy()
        raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, picks=picks)
        epochs_band = mne.Epochs(raw_band,
                                 epochs.events,
                                 epochs.event_id,
                                 tmin=tmin,
                                 tmax=tmax,
                                 baseline=None,
                                 proj=True,
                                 picks=picks)
        with warnings.catch_warnings(record=True):  # not enough samples
            noise_cov = compute_covariance(epochs_band,
                                           tmin=tmin,
                                           tmax=tmin + win_length)
        noise_cov = mne.cov.regularize(noise_cov,
                                       epochs_band.info,
                                       mag=reg,
                                       grad=reg,
                                       eeg=reg,
                                       proj=True)
        noise_covs.append(noise_cov)
        del raw_band  # to save memory

        # Manually calculating source power in on frequency band and several
        # time windows to compare to tf_lcmv results and test overlapping
        if (l_freq, h_freq) == freq_bins[0]:
            for time_window in time_windows:
                with warnings.catch_warnings(record=True):
                    data_cov = compute_covariance(epochs_band,
                                                  tmin=time_window[0],
                                                  tmax=time_window[1])
                stc_source_power = _lcmv_source_power(epochs.info,
                                                      forward,
                                                      noise_cov,
                                                      data_cov,
                                                      reg=reg,
                                                      label=label)
                source_power.append(stc_source_power.data)

    with warnings.catch_warnings(record=True):
        stcs = tf_lcmv(epochs,
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       reg=reg,
                       label=label)

    assert_true(len(stcs) == len(freq_bins))
    assert_true(stcs[0].shape[1] == 4)

    # Averaging all time windows that overlap the time period 0 to 100 ms
    source_power = np.mean(source_power, axis=0)

    # Selecting the first frequency bin in tf_lcmv results
    stc = stcs[0]

    # Comparing tf_lcmv results with _lcmv_source_power results
    assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])

    # Test if using unsupported max-power orientation is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths,
                  freq_bins=freq_bins,
                  pick_ori='max-power')

    # Test if incorrect number of noise CSDs is detected
    # Test if incorrect number of noise covariances is detected
    assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,
                  tmax, tstep, win_lengths, freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths=[0, 1, 2],
                  freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep=0.15,
                  win_lengths=[0.2, 0.1],
                  freq_bins=freq_bins)

    # Test correct detection of preloaded epochs objects that do not contain
    # the underlying raw object
    epochs_preloaded = mne.Epochs(raw,
                                  events,
                                  event_id,
                                  tmin,
                                  tmax,
                                  proj=True,
                                  baseline=(None, 0),
                                  preload=True)
    assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs,
                  tmin, tmax, tstep, win_lengths, freq_bins)

    with warnings.catch_warnings(record=True):  # not enough samples
        # Pass only one epoch to test if subtracting evoked
        # responses yields zeros
        stcs = tf_lcmv(epochs[0],
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       subtract_evoked=True,
                       reg=reg,
                       label=label)

    assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
Example #51
0
 def test_should_be_able_to_authenticate_an_existing_user_with_right_password(
         self):
     with app.test_request_context():
         authenticated, user = User.query.authenticate(username='******',
                                                       password='******')
         assert_true(authenticated)
Example #52
0
 def test_get_user_session(self):
     """ Testing the user session request """
     session = self.auth.get_session()
     assert_true(isinstance(session, basestring))
     assert_equal(len(session), 32)
Example #53
0
File: tests.py Project: Edisnel/hue
    def test_job_single_logs(self):
        if not is_live_cluster():
            raise SkipTest

        response = TestJobBrowserWithHadoop.client.get(
            '/jobbrowser/jobs/%s/single_logs?format=json' %
            (TestJobBrowserWithHadoop.hadoop_job_id))
        json_resp = json.loads(response.content)

        assert_true('logs' in json_resp)
        assert_true('Log Type: stdout' in json_resp['logs'][1])
        assert_true('Log Type: stderr' in json_resp['logs'][2])
        assert_true('Log Type: syslog' in json_resp['logs'][3])

        # Verify that syslog contains log information for a completed oozie job
        match = re.search(
            r"^Log Type: syslog(.+)Log Length: (?P<log_length>\d+)(.+)$",
            json_resp['logs'][3], re.DOTALL)
        assert_true(match and match.group(2),
                    'Failed to parse log length from syslog')
        log_length = match.group(2)
        assert_true(log_length > 0,
                    'Log Length is 0, expected content in syslog.')
Example #54
0
 def test_get_token(self):
     """ Testing the token request """
     self.auth = Auth()
     token = self.auth.get_token()
     assert_true(isinstance(token, basestring))
     assert_equal(len(token), 32)
def test_check_user_positive():
    chkusr = CheckUser("root")
    success, ret_msg, uid, gid = chkusr.check_if_user_exists()
    assert_true(success)
    assert_equals('User root exists', ret_msg)
def test_receptive_field():
    """Test model prep and fitting."""
    from sklearn.linear_model import Ridge
    # Make sure estimator pulling works
    mod = Ridge()

    # Test the receptive field model
    # Define parameters for the model and simulate inputs + weights
    tmin, tmax = 0., 10.
    n_feats = 3
    X = rng.randn(n_feats, 10000)
    w = rng.randn(int((tmax - tmin) + 1) * n_feats)

    # Delay inputs and cut off first 4 values since they'll be cut in the fit
    X_del = np.vstack(_delay_time_series(X, tmin, tmax, 1., axis=-1))
    y = np.dot(w, X_del)
    X = np.rollaxis(X, -1, 0)  # time to first dimension

    # Fit the model and test values
    feature_names = ['feature_%i' % ii for ii in [0, 1, 2]]
    rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod)
    rf.fit(X, y)
    assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1))

    y_pred = rf.predict(X)
    assert_array_almost_equal(y[rf.keep_samples_],
                              y_pred.squeeze()[rf.keep_samples_], 2)
    scores = rf.score(X, y)
    assert_true(scores > .99)
    assert_array_almost_equal(rf.coef_.reshape(-1, order='F'), w, 2)
    # Make sure different input shapes work
    rf.fit(X[:, np.newaxis:, ], y[:, np.newaxis])
    rf.fit(X, y[:, np.newaxis])
    assert_raises(ValueError, rf.fit, X[..., np.newaxis], y)
    assert_raises(ValueError, rf.fit, X[:, 0], y)
    assert_raises(ValueError, rf.fit, X[..., np.newaxis],
                  np.tile(y[..., np.newaxis], [2, 1, 1]))
    # stim features must match length of input data
    assert_raises(ValueError, rf.fit, X[:, :1], y)
    # auto-naming features
    rf = ReceptiveField(tmin, tmax, 1, estimator=mod)
    rf.fit(X, y)
    assert_equal(rf.feature_names, ['feature_%s' % ii for ii in [0, 1, 2]])
    # X/y same n timepoints
    assert_raises(ValueError, rf.fit, X, y[:-2])
    # Float becomes ridge
    rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0)
    str(rf)  # repr works before fit
    rf.fit(X, y)
    assert_true(isinstance(rf.estimator_, TimeDelayingRidge))
    str(rf)  # repr works after fit
    rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0)
    rf.fit(X[:, [0]], y)
    str(rf)  # repr with one feature
    # Should only accept estimators or floats
    rf = ReceptiveField(tmin, tmax, 1, estimator='foo')
    assert_raises(ValueError, rf.fit, X, y)
    rf = ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3]))
    assert_raises(ValueError, rf.fit, X, y)
    # tmin must be <= tmax
    rf = ReceptiveField(5, 4, 1)
    assert_raises(ValueError, rf.fit, X, y)
    # scorers
    for key, val in _SCORERS.items():
        rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0, scoring=key)
        rf.fit(X[:, [0]], y)
        y_pred = rf.predict(X[:, [0]])
        assert_array_almost_equal(val(y[:, np.newaxis], y_pred),
                                  rf.score(X[:, [0]], y), 4)
    # Need 2D input
    assert_raises(ValueError, _SCORERS['corrcoef'], y.squeeze(), y_pred)
    # Need correct scorers
    rf = ReceptiveField(tmin, tmax, 1., scoring='foo')
    assert_raises(ValueError, rf.fit, X, y)
Example #57
0
def test_get_ipython_package_dir():
    ipdir = path.get_ipython_package_dir()
    nt.assert_true(os.path.isdir(ipdir))
Example #58
0
File: tests.py Project: Edisnel/hue
    def test_failed_jobs(self):
        """
    Test jobs with genuine failure, not just killed
    """

        if is_live_cluster():
            raise SkipTest('HUE-2902: Skipping because test is not reentrant')

        # Create design that will fail because the script file isn't there
        INPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/input'
        OUTPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/output'
        try:
            TestJobBrowserWithHadoop.cluster.fs.mkdir(
                TestJobBrowserWithHadoop.home_dir + "/jt-test_failed_jobs")
            TestJobBrowserWithHadoop.cluster.fs.mkdir(INPUT_DIR)
            TestJobBrowserWithHadoop.cluster.fs.rmtree(OUTPUT_DIR)
        except:
            LOG.exception('failed to teardown tests')

        job_name = '%s_%s' % (TestJobBrowserWithHadoop.username,
                              'test_failed_jobs-1')
        response = TestJobBrowserWithHadoop.client.post(
            reverse('jobsub.views.new_design',
                    kwargs={'node_type': 'mapreduce'}), {
                        'name': [job_name],
                        'description': ['description test_failed_jobs-1'],
                        'args':
                        '',
                        'jar_path':
                        '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
                        'prepares':
                        '[]',
                        'archives':
                        '[]',
                        'files':
                        '[]',
                        'job_properties': [
                            '[{"name":"mapred.input.dir","value":"%s"},\
            {"name":"mapred.output.dir","value":"%s"},\
            {"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
            {"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
            {"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]'
                            % (INPUT_DIR, OUTPUT_DIR)
                        ]
                    },
            HTTP_X_REQUESTED_WITH='XMLHttpRequest',
            follow=True)

        # Submit the job
        design_dict = json.loads(response.content)
        design_id = int(design_dict['id'])
        response = TestJobBrowserWithHadoop.client.post(
            reverse('oozie:submit_workflow', args=[design_id]),
            data={
                u'form-MAX_NUM_FORMS': [u''],
                u'form-INITIAL_FORMS': [u'1'],
                u'form-0-name': [u'REDUCER_SLEEP_TIME'],
                u'form-0-value': [u'1'],
                u'form-TOTAL_FORMS': [u'1']
            },
            follow=True)
        oozie_jobid = response.context['oozie_workflow'].id
        job = OozieServerProvider.wait_until_completion(oozie_jobid)
        hadoop_job_id = get_hadoop_job_id(TestJobBrowserWithHadoop.oozie,
                                          oozie_jobid, 1)
        hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)

        # Select only killed jobs (should be absent)
        # Taking advantage of the fact new jobs are at the top of the list!
        response = TestJobBrowserWithHadoop.client.post(
            '/jobbrowser/jobs/', {
                'format': 'json',
                'state': 'killed'
            })
        assert_false(hadoop_job_id_short in response.content)

        # Select only failed jobs (should be present)
        # Map job should succeed. Reduce job should fail.
        response = TestJobBrowserWithHadoop.client.post(
            '/jobbrowser/jobs/', {
                'format': 'json',
                'state': 'failed'
            })
        assert_true(hadoop_job_id_short in response.content)

        raise SkipTest  # Not compatible with MR2

        # The single job view should have the failed task table
        response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s' %
                                                       (hadoop_job_id, ))
        html = response.content.lower()
        assert_true('failed task' in html, html)

        # The map task should say success (empty input)
        map_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace(
            'job', 'task') + '_m_000000'
        response = TestJobBrowserWithHadoop.client.get(
            '/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
        assert_true('succeed' in response.content)
        assert_true('failed' not in response.content)

        # The reduce task should say failed
        reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
        response = TestJobBrowserWithHadoop.client.get(
            '/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
        assert_true('succeed' not in response.content)
        assert_true('failed' in response.content)

        # Selecting by failed state should include the failed map
        response = TestJobBrowserWithHadoop.client.get(
            '/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id, ))
        assert_true('r_000000' in response.content)
        assert_true('m_000000' not in response.content)
Example #59
0
def issue512(sim):
    """
    Test to ensure that StepCurrentSource times are handled similarly across
    all simulators. Multiple combinations of step times tested for:
    1) dt = 0.1 ms, min_delay = 0.1 ms
    2) dt = 0.01 ms, min_delay = 0.01 ms
    Note: exact matches of times not appropriate owing to floating point
    rounding errors. If absolute difference <1e-9, then considered equal.
    """
    def get_len(data):
        if "pyNN.nest" in str(sim):
            # as NEST uses LazyArray
            return len(data.evaluate())
        else:
            return len(data)

    # 1) dt = 0.1 ms, min_delay = 0.1 ms
    dt = 0.1
    sim.setup(timestep=dt, min_delay=dt)
    cells = sim.Population(
        1, sim.IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0, v_rest=-60.0))
    # 1.1) Negative time value
    assert_raises(ValueError,
                  sim.StepCurrentSource,
                  times=[0.4, -0.6, 0.8],
                  amplitudes=[0.5, -0.5, 0.5])
    # 1.2) Time values not monotonically increasing
    assert_raises(ValueError,
                  sim.StepCurrentSource,
                  times=[0.4, 0.2, 0.8],
                  amplitudes=[0.5, -0.5, 0.5])
    # 1.3) Check mapping of time values and removal of duplicates
    step = sim.StepCurrentSource(times=[0.41, 0.42, 0.85],
                                 amplitudes=[0.5, -0.5, 0.5])
    assert_equal(get_len(step.times), 2)
    assert_equal(get_len(step.amplitudes), 2)
    if "pyNN.brian" in str(sim):
        # Brian requires time in seconds (s)
        assert_true(abs(step.times[0] - 0.4 * 1e-3) < 1e-9)
        assert_true(abs(step.times[1] - 0.9 * 1e-3) < 1e-9)
        # Brain requires amplitudes in amperes (A)
        assert_true(step.amplitudes[0] == -0.5 * 1e-9)
        assert_true(step.amplitudes[1] == 0.5 * 1e-9)
    else:
        # NEST requires amplitudes in picoamperes (pA) but stored
        # as LazyArray and so needn't manually adjust; use nA
        # NEURON requires amplitudes in nanoamperes (nA)
        assert_true(step.amplitudes[0] == -0.5)
        assert_true(step.amplitudes[1] == 0.5)
        # NEST and NEURON require time in ms
        # But NEST has time stamps reduced by min_delay
        if "pyNN.nest" in str(sim):
            assert_true(abs(step.times[0] - 0.3) < 1e-9)
            assert_true(abs(step.times[1] - 0.8) < 1e-9)
        else:  # neuron
            assert_true(abs(step.times[0] - 0.4) < 1e-9)
            assert_true(abs(step.times[1] - 0.9) < 1e-9)

    # 2) dt = 0.01 ms, min_delay = 0.01 ms
    dt = 0.01
    sim.setup(timestep=dt, min_delay=dt)
    cells = sim.Population(
        1, sim.IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0, v_rest=-60.0))
    # 2.1) Negative time value
    assert_raises(ValueError,
                  sim.StepCurrentSource,
                  times=[0.4, -0.6, 0.8],
                  amplitudes=[0.5, -0.5, 0.5])
    # 2.2) Time values not monotonically increasing
    assert_raises(ValueError,
                  sim.StepCurrentSource,
                  times=[0.5, 0.4999, 0.8],
                  amplitudes=[0.5, -0.5, 0.5])
    # 2.3) Check mapping of time values and removal of duplicates
    step = sim.StepCurrentSource(times=[0.451, 0.452, 0.85],
                                 amplitudes=[0.5, -0.5, 0.5])
    assert_equal(get_len(step.times), 2)
    assert_equal(get_len(step.amplitudes), 2)
    if "pyNN.brian" in str(sim):
        # Brian requires time in seconds (s)
        assert_true(abs(step.times[0] - 0.45 * 1e-3) < 1e-9)
        assert_true(abs(step.times[1] - 0.85 * 1e-3) < 1e-9)
        # Brain requires amplitudes in amperes (A)
        assert_true(step.amplitudes[0] == -0.5 * 1e-9)
        assert_true(step.amplitudes[1] == 0.5 * 1e-9)
    else:
        # NEST requires amplitudes in picoamperes (pA) but stored
        # as LazyArray and so needn't manually adjust; use nA
        # NEURON requires amplitudes in nanoamperes (nA)
        assert_true(step.amplitudes[0] == -0.5)
        assert_true(step.amplitudes[1] == 0.5)
        # NEST and NEURON require time in ms
        # But NEST has time stamps reduced by min_delay
        if "pyNN.nest" in str(sim):
            assert_true(abs(step.times[0] - 0.44) < 1e-9)
            assert_true(abs(step.times[1] - 0.84) < 1e-9)
        else:  # neuron
            assert_true(abs(step.times[0] - 0.45) < 1e-9)
            assert_true(abs(step.times[1] - 0.85) < 1e-9)
Example #60
0
def test_get_ipython_module_path():
    ipapp_path = path.get_ipython_module_path('IPython.terminal.ipapp')
    nt.assert_true(os.path.isfile(ipapp_path))