コード例 #1
0
def test_within_avg():
    args = Namespace(**args_dict)
    avg_test_val = 3
    args.v_avg = avg_test_val
    roster = run(NFL, args)
    for player in roster.players:
        ntools.assert_less(abs(player.v_avg), avg_test_val)
コード例 #2
0
ファイル: test_testing.py プロジェクト: Afey/scikit-learn
 def test_assert_less():
     # Check that the nose implementation of assert_less gives the
     # same thing as the scikit's
     assert_less(0, 1)
     _assert_less(0, 1)
     assert_raises(AssertionError, assert_less, 1, 0)
     assert_raises(AssertionError, _assert_less, 1, 0)
コード例 #3
0
def _test_models(Model, size):
        model = Model()

        if Model.__name__ == 'LowEntropy':
            raise SkipTest('FIXME LowEntropy.score_counts is not normalized')

        for i, EXAMPLE in enumerate(Model.EXAMPLES):
            print 'Example {}'.format(i)
            model.load(EXAMPLE)
            samples = []
            probs_dict = {}
            for _ in xrange(SAMPLE_COUNT):
                value = model.sample_assignments(size)
                assignments = dict(enumerate(value))
                counts = count_assignments(assignments)
                prob = math.exp(model.score_counts(counts))
                sample = canonicalize(value)
                samples.append(sample)
                probs_dict[sample] = prob

            total = sum(probs_dict.values())
            assert_less(
                abs(total - 1),
                1e-2,
                'not normalized: {}'.format(total))

            gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
            print '{} gof = {:0.3g}'.format(Model.__name__, gof)
            assert_greater(gof, MIN_GOODNESS_OF_FIT)
コード例 #4
0
def test_compressed_encrypt_then_decrypt_string():
    plaintext = "X" * 4096
    key = "this is my key"
    ciphertext = crypto.encrypt_string(plaintext, key, compress=True)
    assert_less(len(ciphertext), len(plaintext) / 10)
    plaintext_after = crypto.decrypt_string(ciphertext, key)
    assert_equal(plaintext, plaintext_after)
コード例 #5
0
ファイル: test_project.py プロジェクト: alexras/pylsdj
def test_read_write_project():
    sample_song_compressed = os.path.join(
        SCRIPT_DIR, "test_data", "sample_song_compressed.json")

    with open(sample_song_compressed, "r") as fp:
        song_data_compressed = json.load(fp)

    song_data = filepack.decompress(song_data_compressed)
    song_name = "UNTOLDST"
    song_version = 23

    # 0xbadf00d for size in blocks is synthetic, since we don't really care
    # about its size for the purposes of this test
    bogus_size_blks = 0xbadf00d

    proj = Project(
        song_name, song_version, bogus_size_blks, song_data)

    assert_equal(proj.name, song_name)
    assert_equal(proj.version, song_version)

    raw_data = proj.get_raw_data()

    recompressed = filepack.compress(raw_data)

    assert_less(math.fabs(len(recompressed) - len(song_data_compressed)), 512)

    # Do comparison based on parsed object, since the actual input data can
    # contain noise
    proj_from_raw_data = Project(
        song_name, song_version, bogus_size_blks, raw_data)

    assert_equal(proj_from_raw_data._song_data, proj._song_data)
コード例 #6
0
def assert_normal(x, y, sigma, stddevs=4.0):
    '''
    Assert that the difference between two values is within a few standard
    deviations of the predicted [normally distributed] error of zero.
    '''
    assert_less(x, y + sigma * stddevs)
    assert_less(y, x + sigma * stddevs)
コード例 #7
0
ファイル: test_root_metrics.py プロジェクト: cyclus/cymetric
def test_decom_schedule(db, fname, backend):
    r = root_metrics.decom_schedule(db=db)
    obs = r()
    if obs is None:
        return
    assert_less(0, len(obs))
    assert_equal('DecomSchedule', r.name)
コード例 #8
0
ファイル: test_root_metrics.py プロジェクト: cyclus/cymetric
def test_agent_exit(db, fname, backend):
    r = root_metrics.agent_exit(db=db)
    obs = r()
    if obs is None:
        return
    assert_less(0, len(obs))
    assert_equal('AgentExit', r.name)
コード例 #9
0
ファイル: test_query_math.py プロジェクト: fritzo/loom
def test_score_none(root, encoding, **unused):
    with loom.query.get_server(root, debug=True) as server:
        preql = loom.preql.PreQL(server, encoding)
        fnames = preql.feature_names
        assert_less(
            abs(server.score([None for _ in fnames])),
            SCORE_TOLERANCE)
コード例 #10
0
ファイル: test_util.py プロジェクト: ericmjonas/distributions
def test_scores_to_probs():
    scores = [-10000, 10000, 10001, 9999, 0, 5, 6, 6, 7]
    probs = scores_to_probs(scores)
    assert_less(abs(sum(probs) - 1), 1e-6)
    for prob in probs:
        assert_less_equal(0, prob)
        assert_less_equal(prob, 1)
コード例 #11
0
ファイル: test_root_metrics.py プロジェクト: cyclus/cymetric
def test_products(db, fname, backend):
    r = root_metrics.products(db=db)
    obs = r()
    if obs is None:
        return
    assert_less(0, len(obs))
    assert_equal('Products', r.name)
コード例 #12
0
 def testSine(self, method):
     # Test with a rounded sine function. Data should be reduced
     sx = np.arange(1000) * .02
     rsine = np.round(np.sin(sx) * 10.) / 10.
     rx, ry = unstair(sx, rsine, method=method)
     assert_less(rx.size, sx.size)
     assert_less(ry.size, rsine.size)
コード例 #13
0
def test_ttest_ind():
    "Test testnd.ttest_ind()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds)
    repr(res)
    assert_less(res.p_uncorrected.min(), 0.05)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # cluster
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # nd
    res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2)
    eq_(res._cdist.n_clusters, 10)
コード例 #14
0
ファイル: test_concordance.py プロジェクト: HuttonICS/pyani
    def test_aniblastall_concordance(self):
        """ANIblastall results concordant with JSpecies."""
        # Perform ANIblastall on the input directory contents
        outdir = os.path.join(self.outdir, "blastall")
        os.makedirs(outdir, exist_ok=True)
        fragfiles, fraglengths = anib.fragment_fasta_files(
            self.infiles, outdir, self.fragsize
        )
        jobgraph = anib.make_job_graph(
            self.infiles, fragfiles, anib.make_blastcmd_builder("ANIblastall", outdir)
        )
        assert_equal(0, run_mp.run_dependency_graph(jobgraph))
        results = anib.process_blast(
            outdir, self.orglengths, fraglengths, mode="ANIblastall"
        )
        result_pid = results.percentage_identity
        result_pid.to_csv(os.path.join(self.outdir, "pyani_aniblastall.tab"), sep="\t")

        # Compare JSpecies output to results
        result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
        diffmat = result_pid.values - self.target["ANIb"].values
        aniblastall_diff = pd.DataFrame(
            diffmat, index=result_pid.index, columns=result_pid.columns
        )
        aniblastall_diff.to_csv(
            os.path.join(self.outdir, "pyani_aniblastall_diff.tab"), sep="\t"
        )
        assert_less(aniblastall_diff.abs().values.max(), self.tolerance["ANIblastall"])
コード例 #15
0
def test_info_serialliar():
    fib_tracker = [0]
    inspector.info(SerialLiar(fib_tracker))

    # Nested attribute access should be cut off at 100 levels deep to avoid
    # infinite loops: https://github.com/ipython/ipython/issues/9122
    nt.assert_less(fib_tracker[0], 9000)
コード例 #16
0
ファイル: test_performance.py プロジェクト: Akhilan/Mailpile
 def __init__(self):
     self.mp = get_shared_mailpile()
     self.mp.set("sys.postinglist_kb=%s" % postinglist_kb)
     self.mp.set("prefs.num_results=50")
     self.mp.set("prefs.default_order=rev-date")
     results = self.mp.search(*query)
     assert_less(float(results.as_dict()["elapsed"]), 0.2)
コード例 #17
0
ファイル: test_utils.py プロジェクト: diogo149/hooky
def check_updates_node(constructor):
    np.random.seed(42)
    x = np.random.randn(5).astype(fX)
    w = np.random.randn(5) * 20
    b = np.random.randn() + 35
    y = (np.dot(x, w) + b).astype(fX)[np.newaxis]

    graph = hth.RootGraph(
        [hth.AddSequential("model",
                           nodes=[hth.InputNode("x", shape=(5,)),
                                  hth.AffineNode("pred", num_units=1)]),
         hth.AddOne("target",
                    node=hth.InputNode("y", shape=(1,))),
         hth.AddOne("cost",
                    node=hth.SquaredErrorCostNode("mse"),
                    inputs={"target": ("target", "y"),
                            "pred": ("model", "pred")}),
         hth.AddOne("updates",
                    node=constructor("node"),
                    inputs={"cost": ("cost", "mse")}),
         hth.AddOne("fn",
                    node=hth.TheanoDictFunctionDSLNode(
                        "dsl",
                        inputs={"x": ("model", "x"),
                                "y": ("target", "y")},
                        outputs={"cost": ("cost", "mse")},
                        updates_path=("updates", "node"))),
         hth.UseDatumCallFunction("call", node_path=("fn", "dsl"))])

    prev_cost = graph(in_dict={"x": x, "y": y})["cost"]
    for _ in range(5):
        cost = graph(in_dict={"x": x, "y": y})["cost"]
        print prev_cost, cost
        nt.assert_less(cost, prev_cost)
        prev_cost = cost
コード例 #18
0
def test_constraint_removal():
    digits = load_digits()
    X, y = digits.data, digits.target
    y = 2 * (y % 2) - 1  # even vs odd as +1 vs -1
    X = X / 16.
    pbl = BinarySVMModel(n_features=X.shape[1])
    clf_no_removal = OneSlackSSVM(model=pbl, max_iter=500, verbose=1, C=10,
                                  inactive_window=0, tol=0.01)
    clf_no_removal.fit(X, y)
    clf = OneSlackSSVM(model=pbl, max_iter=500, verbose=1, C=10, tol=0.01,
                       inactive_threshold=1e-8)
    clf.fit(X, y)

    # results are mostly equal
    # if we decrease tol, they will get more similar
    assert_less(np.mean(clf.predict(X) != clf_no_removal.predict(X)), 0.02)

    # without removal, have as many constraints as iterations
    # +1 for true y constraint
    assert_equal(len(clf_no_removal.objective_curve_) + 1,
                 len(clf_no_removal.constraints_))

    # with removal, there are less constraints than iterations
    assert_less(len(clf.constraints_),
                len(clf.objective_curve_))
コード例 #19
0
ファイル: test_concordance.py プロジェクト: HuttonICS/pyani
    def test_anim_concordance(self):
        """ANIm results concordant with JSpecies."""
        # Perform ANIm on the input directory contents
        # We have to separate nucmer/delta-filter command generation
        # because Travis-CI doesn't play nicely with changes we made
        # for local SGE/OGE integration.
        # This might be avoidable with a scheduler flag passed to
        # jobgroup generation in the anim.py module. That's a TODO.
        ncmds, fcmds = anim.generate_nucmer_commands(self.infiles, self.outdir)
        run_mp.multiprocessing_run(ncmds)

        # delta-filter commands need to be treated with care for
        # Travis-CI. Our cluster won't take redirection or semicolon
        # separation in individual commands, but the wrapper we wrote
        # for this (delta_filter_wrapper.py) can't be called under
        # Travis-CI. So we must deconstruct the commands below
        dfcmds = [
            " > ".join([" ".join(fcmd.split()[1:-1]), fcmd.split()[-1]])
            for fcmd in fcmds
        ]
        run_mp.multiprocessing_run(dfcmds)

        results = anim.process_deltadir(self.deltadir, self.orglengths)
        result_pid = results.percentage_identity
        result_pid.to_csv(os.path.join(self.outdir, "pyani_anim.tab"), sep="\t")

        # Compare JSpecies output to results
        result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
        diffmat = result_pid.values - self.target["ANIm"].values
        anim_diff = pd.DataFrame(
            diffmat, index=result_pid.index, columns=result_pid.columns
        )
        anim_diff.to_csv(os.path.join(self.outdir, "pyani_anim_diff.tab"), sep="\t")
        assert_less(anim_diff.abs().values.max(), self.tolerance["ANIm"])
コード例 #20
0
def test_score_counts_is_normalized(Model, EXAMPLE, sample_count):

    for sample_size in iter_valid_sizes(EXAMPLE, max_size=10):
        model = Model()
        model.load(EXAMPLE)

        if Model.__name__ == 'LowEntropy' and sample_size < model.dataset_size:
            print 'WARNING LowEntropy.score_counts normalization is imprecise'
            print '  when sample_size < dataset_size'
            tol = 0.5
        else:
            tol = 0.01

        probs_dict = {}
        for _ in xrange(sample_count):
            value = model.sample_assignments(sample_size)
            sample = canonicalize(value)
            if sample not in probs_dict:
                assignments = dict(enumerate(value))
                counts = count_assignments(assignments)
                prob = math.exp(model.score_counts(counts))
                probs_dict[sample] = prob

        total = sum(probs_dict.values())
        assert_less(abs(total - 1), tol, 'not normalized: {}'.format(total))
コード例 #21
0
ファイル: test_read.py プロジェクト: tlevine/friendly_brief
def check_amici(brief, expectation):
    def standardize(nonstandard:str) -> str:
        return re.sub(r'[ .]', '', nonstandard.strip().lower())
    observation = list(f.amici(brief))
    for expected_amicus in expectation:
        for observed_amicus in observation:
            if standardize(expected_amicus) in standardize(observed_amicus):
                break
        else:
            msg = '''The expected amicus "%s" should be found in the following brief.

    %s

But it is not among the following values output from the amici function.

    %s'''
            raise AssertionError(msg % (expected_amicus, brief, pformat(observation)))

    lengths = (len(observation), len(expectation))
    if len(observation) < len(expectation): # - 1: # Failures at non-Oxford comma are okay.
        msg = 'The amici were not broken up enough; only %d amici were reported, but there are supposed to be %d:\n' + '\n* '.join(observation)
        raise AssertionError(msg % lengths)
    if len(observation) > len(expectation) + 1:
        msg = 'The amici were too broken up; %d amici were reported, but there are only supposed to be %d:\n' + '\n* '.join(observation)
        raise AssertionError(msg % lengths)

    for observed_amicus in observation:
        n.assert_less(len(observed_amicus), 80, msg = observation)
コード例 #22
0
def test():
    year = 2013
    reform = landais_piketty_saez.build_reform(base.tax_benefit_system)
    scenario = reform.new_scenario().init_single_entity(
        axes = [
            dict(
                count = 10,
                max = 30000,
                min = 0,
                name = 'sali',
                ),
            ],
        period = periods.period('year', year),
        parent1 = dict(birth = datetime.date(year - 40, 1, 1)),
        parent2 = dict(birth = datetime.date(year - 40, 1, 1)),
        enfants = [
            dict(birth = datetime.date(year - 9, 1, 1)),
            dict(birth = datetime.date(year - 9, 1, 1)),
            ],
        )

    reference_simulation = scenario.new_simulation(debug = True, reference = True)

    error_margin = 0.01
    af = reference_simulation.calculate('af')
    expected_af = [1532.16] * 10
    assert_less(max(abs(expected_af - af)), error_margin)
    revdisp = reference_simulation.calculate('revdisp')

    reform_simulation = scenario.new_simulation(debug = True)
    reform_af = reform_simulation.calculate('af')
    assert_less(max(abs(expected_af - reform_af)), error_margin)
    reform_revdisp = reform_simulation.calculate('revdisp')
コード例 #23
0
ファイル: util.py プロジェクト: ericmjonas/distributions
def assert_close(lhs, rhs, tol=TOL, err_msg=None):
    if isinstance(lhs, dict):
        assert_true(
            isinstance(rhs, dict),
            'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
        assert_equal(set(lhs.keys()), set(rhs.keys()))
        for key, val in lhs.iteritems():
            msg = '{}[{}]'.format(err_msg or '', key)
            assert_close(val, rhs[key], tol, msg)
    elif isinstance(lhs, float) or isinstance(lhs, numpy.float64):
        assert_true(
            isinstance(rhs, float) or isinstance(rhs, numpy.float64),
            'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
        diff = abs(lhs - rhs)
        norm = 1 + abs(lhs) + abs(rhs)
        msg = '{} off by {}% = {}'.format(err_msg, 100 * diff / norm, diff)
        assert_less(diff, tol * norm, msg)
    elif isinstance(lhs, numpy.ndarray) or isinstance(lhs, list):
        assert_true(
            isinstance(rhs, numpy.ndarray) or isinstance(rhs, list),
            'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
        decimal = int(round(-math.log10(tol)))
        assert_array_almost_equal(
            lhs,
            rhs,
            decimal=decimal,
            err_msg=(err_msg or ''))
    else:
        assert_equal(lhs, rhs, err_msg)
コード例 #24
0
def valid_hypergraph(hypergraph):
    """
    Check the assumptions about the hypergraph.
    """

    root_count = 0
    terminal = True
    children = set()

    # Check that terminal nodes are first.
    print len(hypergraph.nodes)
    print len(hypergraph.edges)
    for node in hypergraph.nodes:
        print node.id

    for node in hypergraph.nodes:
        if not terminal and len(node.edges) == 0:
            assert False
        if len(node.edges) != 0:
            terminal = False

        # Check ordering.
        for edge in node.edges:
            for tail_node in edge.tail:
                nt.assert_less(tail_node.id, node.id)
                children.add(tail_node.id)

    # Only 1 root.
    nt.assert_equal(len(children), len(hypergraph.nodes) - 1)
コード例 #25
0
ファイル: 2_usb_to_can.py プロジェクト: n2aws/panda
def test_gmlan():
  p = connect_wo_esp()

  if p.legacy:
    return

  # enable output mode
  p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)

  # enable CAN loopback mode
  p.set_can_loopback(True)

  p.set_can_speed_kbps(1, SPEED_NORMAL)
  p.set_can_speed_kbps(2, SPEED_NORMAL)
  p.set_can_speed_kbps(3, SPEED_GMLAN)
 
  # set gmlan on CAN2
  for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3, Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
    p.set_gmlan(bus)
    comp_kbps_gmlan = time_many_sends(p, 3)
    assert_greater(comp_kbps_gmlan, 0.8 * SPEED_GMLAN)
    assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)

    p.set_gmlan(None)
    comp_kbps_normal = time_many_sends(p, bus)
    assert_greater(comp_kbps_normal, 0.8 * SPEED_NORMAL)
    assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)

    print("%d: %.2f kbps vs %.2f kbps" % (bus, comp_kbps_gmlan, comp_kbps_normal))
コード例 #26
0
    def _get_example_window(self, row, col):
        '''
        Returns a model-input-sized subwindow of self.examples_pixels.

        Parameters
        ----------
        row, col: int
          The grid row and column of the subwindow to extract.
        '''

        # input_format = self.model.input_node.output_format
        # input_shape = numpy.asarray(
        #     [input_format.shape[input_format.axes.index(a)]
        #      for a in ('0', '1')])
        input_shape = self.original_image_size
        gutter = 10  # space between images in self.status_pixels
        row_col = numpy.asarray([row, col])
        min_corner = gutter + (input_shape + gutter) * row_col
        max_corner = min_corner + input_shape
        assert_less(max_corner[0], self.examples_pixels.shape[0])
        assert_less(max_corner[1], self.examples_pixels.shape[1])

        return self.examples_pixels[min_corner[0]:max_corner[0],
                                    min_corner[1]:max_corner[1],
                                    :]
コード例 #27
0
ファイル: test_scans.py プロジェクト: klauer/bluesky
def test_center():
    assert_true(not RE._run_is_open)
    det = SynGauss('det', motor, 'motor', 0, 1000, 1, 'poisson', True)
    d = {}
    cen = Center([det], 'det', motor, 0.1, 1.1, 0.01, d)
    RE(cen)
    assert_less(abs(d['center']), 0.1)
コード例 #28
0
ファイル: test_plsa.py プロジェクト: gpfreitas/topik
def test_cal_likelihood():
    dz = _rand_mat(len(test_vectorized_output), ntopics)
    zw = _rand_mat(ntopics, test_vectorized_output.global_term_count)
    p_dw = np.zeros((len(test_vectorized_output), test_vectorized_output.global_term_count))
    p_dw = _cal_p_dw(words_in_docs, word_cts_in_docs, range(ntopics), zw, dz, 0.8, p_dw)
    likelihood = _cal_likelihood(words_in_docs, word_cts_in_docs, p_dw)
    nt.assert_less(likelihood, 0)
コード例 #29
0
ファイル: 2_usb_to_can.py プロジェクト: n2aws/panda
def test_gmlan_bad_toggle():
  p = connect_wo_esp()

  if p.legacy:
    return

  # enable output mode
  p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)

  # enable CAN loopback mode
  p.set_can_loopback(True)

  # GMLAN_CAN2
  for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
    p.set_gmlan(bus)
    comp_kbps_gmlan = time_many_sends(p, 3)
    assert_greater(comp_kbps_gmlan, 0.6 * SPEED_GMLAN)
    assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)

  # normal
  for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
    p.set_gmlan(None)
    comp_kbps_normal = time_many_sends(p, bus)
    assert_greater(comp_kbps_normal, 0.6 * SPEED_NORMAL)
    assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)
コード例 #30
0
ファイル: test_synapse_types.py プロジェクト: antolikjan/PyNN
def test_simple_stochastic_synapse(sim, plot_figure=False):
    # in this test we connect
    sim.setup(min_delay=0.5)
    t_stop = 1000.0
    spike_times = np.arange(2.5, t_stop, 5.0)
    source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times))
    neurons = sim.Population(4, sim.IF_cond_exp(tau_syn_E=1.0))
    synapse_type = sim.SimpleStochasticSynapse(weight=0.5,
                                               p=np.array([[0.0, 0.5, 0.5, 1.0]]))
    connections = sim.Projection(source, neurons, sim.AllToAllConnector(),
                                 synapse_type=synapse_type)
    source.record('spikes')
    neurons.record('gsyn_exc')
    sim.run(t_stop)

    data = neurons.get_data().segments[0]
    gsyn = data.analogsignals[0].rescale('uS')
    if plot_figure:
        import matplotlib.pyplot as plt
        for i in range(neurons.size):
            plt.subplot(neurons.size, 1, i+1)
            plt.plot(gsyn.times, gsyn[:, i])
        plt.savefig("test_simple_stochastic_synapse_%s.png" % sim.__name__)
    print(data.analogsignals[0].units)
    crossings = []
    for i in range(neurons.size):
        crossings.append(
                gsyn.times[:-1][np.logical_and(gsyn.magnitude[:-1, i] < 0.4, 0.4 < gsyn.magnitude[1:, i])])
    assert_equal(crossings[0].size, 0)
    assert_less(crossings[1].size, 0.6*spike_times.size)
    assert_greater(crossings[1].size, 0.4*spike_times.size)
    assert_equal(crossings[3].size, spike_times.size)
    assert_not_equal(crossings[1], crossings[2])
    print(crossings[1].size / spike_times.size)
    return data
コード例 #31
0
ファイル: test_evaluator.py プロジェクト: opotowsky/cymetric
def test_eval(db, fname, backend):
    df = evaluator.eval('Materials', db)
    assert_less(0, len(df))
コード例 #32
0
def check_materr(row):
    maxrelerr = row[4]
    if maxrelerr < 0.01:
        return
    weightederr = row[7]
    assert_less(weightederr, 0.1)
コード例 #33
0
 def test_with_too_large_threshold(self):
     threshold = 25
     for s in self.scoresheet.ranked_items(threshold=threshold):
         assert_less(len(s), threshold)
コード例 #34
0
ファイル: test_hypergeom.py プロジェクト: qqqube/permute
def less():
	assert_less(hypergeom([0, 1, 0, 1, 0, 1, 0, 1], 5, 2, 10**5, 'less')[0], 1)
コード例 #35
0
ファイル: test_gmm.py プロジェクト: hayoonlee96/gmr
def test_estimate_moments():
    """Test moments estimated from samples and sampling from GMM."""
    global X
    global random_state

    gmm = GMM(n_components=2, random_state=random_state)
    gmm.from_samples(X)
    assert_less(np.linalg.norm(gmm.means[0] - means[0]), 0.005)
    assert_less(np.linalg.norm(gmm.covariances[0] - covariances[0]), 0.01)
    assert_less(np.linalg.norm(gmm.means[1] - means[1]), 0.01)
    assert_less(np.linalg.norm(gmm.covariances[1] - covariances[1]), 0.03)

    X = gmm.sample(n_samples=100000)

    gmm = GMM(n_components=2, random_state=random_state)
    gmm.from_samples(X)
    assert_less(np.linalg.norm(gmm.means[0] - means[0]), 0.01)
    assert_less(np.linalg.norm(gmm.covariances[0] - covariances[0]), 0.03)
    assert_less(np.linalg.norm(gmm.means[1] - means[1]), 0.01)
    assert_less(np.linalg.norm(gmm.covariances[1] - covariances[1]), 0.04)
コード例 #36
0
def test_clusterdist():
    "Test _ClusterDist class"
    shape = (10, 6, 6, 4)
    locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]
    x = np.random.normal(0, 1, shape)
    sensor = Sensor(locs, ['0', '1', '2', '3'])
    sensor.set_connectivity(connect_dist=1.1)
    dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6),
                                              'unit'), sensor)
    y = NDVar(x, dims)

    # test connecting sensors
    logging.info("TEST:  connecting sensors")
    bin_map = np.zeros(shape[1:], dtype=np.bool8)
    bin_map[:3, :3, :2] = True
    pmap = np.random.normal(0, 1, shape[1:])
    np.clip(pmap, -1, 1, pmap)
    pmap[bin_map] = 2
    cdist = _ClusterDist(y, 0, 1.5)
    print(repr(cdist))
    cdist.add_original(pmap)
    print(repr(cdist))
    assert_equal(cdist.n_clusters, 1)
    assert_array_equal(cdist._original_cluster_map == cdist._cids[0],
                       cdist._crop(bin_map).swapaxes(0, cdist._nad_ax))
    assert_equal(cdist.parameter_map.dims, y.dims[1:])

    # test connecting many sensors
    logging.info("TEST:  connecting sensors")
    bin_map = np.zeros(shape[1:], dtype=np.bool8)
    bin_map[:3, :3] = True
    pmap = np.random.normal(0, 1, shape[1:])
    np.clip(pmap, -1, 1, pmap)
    pmap[bin_map] = 2
    cdist = _ClusterDist(y, 0, 1.5)
    cdist.add_original(pmap)
    assert_equal(cdist.n_clusters, 1)
    assert_array_equal(cdist._original_cluster_map == cdist._cids[0],
                       cdist._crop(bin_map).swapaxes(0, cdist._nad_ax))

    # test keeping sensors separate
    logging.info("TEST:  keeping sensors separate")
    bin_map = np.zeros(shape[1:], dtype=np.bool8)
    bin_map[:3, :3, 0] = True
    bin_map[:3, :3, 2] = True
    pmap = np.random.normal(0, 1, shape[1:])
    np.clip(pmap, -1, 1, pmap)
    pmap[bin_map] = 2
    cdist = _ClusterDist(y, 1, 1.5)
    cdist.add_original(pmap)
    assert_equal(cdist.n_clusters, 2)

    # criteria
    ds = datasets.get_uts(True)
    res = testnd.ttest_rel('utsnd',
                           'A',
                           match='rm',
                           ds=ds,
                           samples=0,
                           pmin=0.05)
    assert_less(res.clusters['duration'].min(), 0.01)
    eq_(res.clusters['n_sensors'].min(), 1)
    res = testnd.ttest_rel('utsnd',
                           'A',
                           match='rm',
                           ds=ds,
                           samples=0,
                           pmin=0.05,
                           mintime=0.02,
                           minsensor=2)
    assert_greater_equal(res.clusters['duration'].min(), 0.02)
    eq_(res.clusters['n_sensors'].min(), 2)

    # 1d
    res1d = testnd.ttest_rel('utsnd.sub(time=0.1)',
                             'A',
                             match='rm',
                             ds=ds,
                             samples=0,
                             pmin=0.05)
    assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1))

    # TFCE
    logging.info("TEST:  TFCE")
    sensor = Sensor(locs, ['0', '1', '2', '3'])
    sensor.set_connectivity(connect_dist=1.1)
    time = UTS(-0.1, 0.1, 4)
    scalar = Scalar('scalar', range(10), 'unit')
    dims = ('case', time, sensor, scalar)
    np.random.seed(0)
    y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims)
    cdist = _ClusterDist(y, 3, None)
    cdist.add_original(y.x[0])
    cdist.finalize()
    assert_equal(cdist.dist.shape, (3, ))
    # I/O
    string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL)
    cdist_ = pickle.loads(string)
    assert_equal(repr(cdist_), repr(cdist))
    # find peaks
    x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [7, 7, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                  [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [5, 7, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 6, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                  [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 7, 5, 5, 0, 0],
                   [0, 0, 0, 0, 5, 4, 4, 4, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                  [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 4, 0, 0],
                   [0, 0, 0, 0, 7, 0, 0, 3, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]])
    tgt = np.equal(x, 7)
    peaks = find_peaks(x, cdist._connectivity)
    logging.debug(' detected: \n%s' % (peaks.astype(int)))
    logging.debug(' target: \n%s' % (tgt.astype(int)))
    assert_array_equal(peaks, tgt)
    # testnd permutation result
    res = testnd.ttest_1samp(y, tfce=True, samples=3)
    if sys.version_info[0] == 3:
        target = [96.84232967, 205.83207424, 425.65942084]
    else:
        target = [77.5852307, 119.1976153, 217.6270428]
    assert_allclose(np.sort(res._cdist.dist), target)

    # parc with TFCE on unconnected dimension
    configure(False)
    x = np.random.normal(0, 1, (10, 5, 2, 4))
    time = UTS(-0.1, 0.1, 5)
    categorial = Categorial('categorial', ('a', 'b'))
    y = NDVar(x, ('case', time, categorial, sensor))
    y0 = NDVar(x[:, :, 0], ('case', time, sensor))
    y1 = NDVar(x[:, :, 1], ('case', time, sensor))
    res = testnd.ttest_1samp(y, tfce=True, samples=3)
    res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial')
    res0 = testnd.ttest_1samp(y0, tfce=True, samples=3)
    res1 = testnd.ttest_1samp(y1, tfce=True, samples=3)
    # cdist
    eq_(res._cdist.shape, (4, 2, 5))
    # T-maps don't depend on connectivity
    assert_array_equal(res.t.x[:, 0], res0.t.x)
    assert_array_equal(res.t.x[:, 1], res1.t.x)
    assert_array_equal(res_parc.t.x[:, 0], res0.t.x)
    assert_array_equal(res_parc.t.x[:, 1], res1.t.x)
    # TFCE-maps should always be the same because they're unconnected
    assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x)
    assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x)
    assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x)
    assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x)
    # Probability-maps should depend on what is taken into account
    p_a = res0.compute_probability_map().x
    p_b = res1.compute_probability_map().x
    assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a)
    assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b)
    p_parc = res_parc.compute_probability_map()
    assert_array_equal(p_parc.x, res.compute_probability_map().x)
    ok_(np.all(p_parc.sub(categorial='a').x >= p_a))
    ok_(np.all(p_parc.sub(categorial='b').x >= p_b))
    configure(True)
コード例 #37
0
def test_root_decrease():
    "Tests that the root isotope is not being skipped"
    phi = 1e12 * np.ones(175)
    inp = Material({'FE56': 1.0}, mass=1.0)
    obs = tm.transmute(inp, t=100.0, phi=phi, tol=1e-7)
    assert_less(obs['FE56'], 1.0)
コード例 #38
0
def features(words, i):
    assert_true(isinstance(i, int))
    assert_less(-1, i)
    assert_less(i, len(words))

    yield words[i].lower()
コード例 #39
0
ファイル: test_hypergeom.py プロジェクト: qqqube/permute
def lessWTS():
	assert_less(hypergeom([0, 1, 0, 1, 0, 1, 0, 1], 5, 2, 10**5, 'less', True)[0], 1)
コード例 #40
0
def test_walk():
    walked = []
    firstfile = utils.make_bogus_data_file()
    schedule_for_cleanup(firstfile)
    project_entity = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(project_entity.id)
    folder_entity = syn.store(
        Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(folder_entity.id)
    second_folder = syn.store(
        Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(second_folder.id)
    file_entity = syn.store(File(firstfile, parent=project_entity))
    schedule_for_cleanup(file_entity.id)

    walked.append(((project_entity.name, project_entity.id), [
        (folder_entity.name, folder_entity.id),
        (second_folder.name, second_folder.id)
    ], [(file_entity.name, file_entity.id)]))

    nested_folder = syn.store(
        Folder(name=str(uuid.uuid4()), parent=folder_entity))
    schedule_for_cleanup(nested_folder.id)
    secondfile = utils.make_bogus_data_file()
    schedule_for_cleanup(secondfile)
    second_file = syn.store(File(secondfile, parent=nested_folder))
    schedule_for_cleanup(second_file.id)
    thirdfile = utils.make_bogus_data_file()
    schedule_for_cleanup(thirdfile)
    third_file = syn.store(File(thirdfile, parent=second_folder))
    schedule_for_cleanup(third_file.id)

    walked.append(((os.path.join(project_entity.name,
                                 folder_entity.name), folder_entity.id),
                   [(nested_folder.name, nested_folder.id)], []))
    walked.append(
        ((os.path.join(os.path.join(project_entity.name, folder_entity.name),
                       nested_folder.name), nested_folder.id), [],
         [(second_file.name, second_file.id)]))
    walked.append(((os.path.join(project_entity.name, second_folder.name),
                    second_folder.id), [], [(third_file.name, third_file.id)]))

    #walk() uses query() which returns results that will be eventually consistent with synapse but not immediately after creating the entities
    start_time = time.time()
    while syn.query("select id from entity where id=='%s'" %
                    third_file.id).get('totalNumberOfResults') <= 0:
        assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
        time.sleep(2)

    temp = synapseutils.walk(syn, project_entity.id)
    temp = list(temp)
    #Must sort the tuples returned, because order matters for the assert
    #Folders are returned in a different ordering depending on the name
    for i in walked:
        for x in i:
            if type(x) == list:
                x = x.sort()
    for i in temp:
        for x in i:
            if type(x) == list:
                x = x.sort()
        assert i in walked

    print("CHECK: synapseutils.walk on a file should return empty generator")
    temp = synapseutils.walk(syn, second_file.id)
    assert list(temp) == []
コード例 #41
0
def test_copy():
    """Tests the copy function"""
    # Create a Project
    project_entity = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(project_entity.id)
    acl = syn.setPermissions(
        project_entity,
        other_user['principalId'],
        accessType=['READ', 'CREATE', 'UPDATE', 'DOWNLOAD'])
    # Create two Folders in Project
    folder_entity = syn.store(
        Folder(name=str(uuid.uuid4()), parent=project_entity))
    second_folder = syn.store(
        Folder(name=str(uuid.uuid4()), parent=project_entity))
    third_folder = syn.store(
        Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(folder_entity.id)
    schedule_for_cleanup(second_folder.id)
    schedule_for_cleanup(third_folder.id)

    # Annotations and provenance
    repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
    annots = {'test': ['hello_world']}
    prov = Activity(name="test", used=repo_url)
    # Create, upload, and set annotations/provenance on a file in Folder
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    file_entity = syn.store(File(filename, parent=folder_entity))
    externalURL_entity = syn.store(
        File(repo_url, name='rand', parent=folder_entity, synapseStore=False))
    syn.setAnnotations(file_entity, annots)
    syn.setAnnotations(externalURL_entity, annots)
    syn.setProvenance(externalURL_entity.id, prov)
    schedule_for_cleanup(file_entity.id)
    schedule_for_cleanup(externalURL_entity.id)
    # ------------------------------------
    # TEST COPY FILE
    # ------------------------------------
    output = synapseutils.copy(syn,
                               file_entity.id,
                               destinationId=project_entity.id)
    output_URL = synapseutils.copy(syn,
                                   externalURL_entity.id,
                                   destinationId=project_entity.id,
                                   skipCopyAnnotations=True)

    #Verify that our copied files are identical
    copied_ent = syn.get(output[file_entity.id])
    copied_URL_ent = syn.get(output_URL[externalURL_entity.id],
                             downloadFile=False)

    copied_ent_annot = syn.getAnnotations(copied_ent)
    copied_url_annot = syn.getAnnotations(copied_URL_ent)
    copied_prov = syn.getProvenance(copied_ent)
    copied_url_prov = syn.getProvenance(copied_URL_ent)
    schedule_for_cleanup(copied_ent.id)
    schedule_for_cleanup(copied_URL_ent.id)

    # TEST: set_Provenance = Traceback
    print("Test: setProvenance = Traceback")
    assert copied_prov['used'][0]['reference']['targetId'] == file_entity.id
    assert copied_url_prov['used'][0]['reference'][
        'targetId'] == externalURL_entity.id

    # TEST: Make sure copied files are the same
    assert copied_ent_annot == annots
    assert copied_ent.dataFileHandleId == file_entity.dataFileHandleId

    # TEST: Make sure copied URLs are the same
    assert copied_url_annot == {}
    assert copied_URL_ent.externalURL == repo_url
    assert copied_URL_ent.name == 'rand'
    assert copied_URL_ent.dataFileHandleId == externalURL_entity.dataFileHandleId

    # TEST: Throw error if file is copied to a folder/project that has a file with the same filename
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  project_entity.id,
                  destinationId=project_entity.id)
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  file_entity.id,
                  destinationId=project_entity.id)
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  file_entity.id,
                  destinationId=third_folder.id,
                  setProvenance="gib")
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  file_entity.id,
                  destinationId=file_entity.id)

    print("Test: setProvenance = None")
    output = synapseutils.copy(syn,
                               file_entity.id,
                               destinationId=second_folder.id,
                               setProvenance=None)
    assert_raises(SynapseHTTPError, syn.getProvenance, output[file_entity.id])
    schedule_for_cleanup(output[file_entity.id])

    print("Test: setProvenance = Existing")
    output_URL = synapseutils.copy(syn,
                                   externalURL_entity.id,
                                   destinationId=second_folder.id,
                                   setProvenance="existing")
    output_prov = syn.getProvenance(output_URL[externalURL_entity.id])
    schedule_for_cleanup(output_URL[externalURL_entity.id])
    assert output_prov['name'] == prov['name']
    assert output_prov['used'] == prov['used']

    if 'username' not in other_user or 'password' not in other_user:
        sys.stderr.write(
            '\nWarning: no test-authentication configured. skipping testing copy function when trying to copy file made by another user.\n'
        )
        return

    try:
        print(
            "Test: Other user copy should result in different data file handle"
        )
        syn_other = synapseclient.Synapse(skip_checks=True)
        syn_other.login(other_user['username'], other_user['password'])

        output = synapseutils.copy(syn_other,
                                   file_entity.id,
                                   destinationId=third_folder.id)
        new_copied_ent = syn.get(output[file_entity.id])
        new_copied_ent_annot = syn.getAnnotations(new_copied_ent)
        schedule_for_cleanup(new_copied_ent.id)

        copied_URL_ent.externalURL = "https://www.google.com"
        copied_URL_ent = syn.store(copied_URL_ent)
        output = synapseutils.copy(syn_other,
                                   copied_URL_ent.id,
                                   destinationId=third_folder.id,
                                   version=1)
        new_copied_URL = syn.get(output[copied_URL_ent.id], downloadFile=False)
        schedule_for_cleanup(new_copied_URL.id)

        assert new_copied_ent_annot == annots
        assert new_copied_ent.dataFileHandleId != copied_ent.dataFileHandleId
        #Test if copying different versions gets you the correct file
        assert new_copied_URL.versionNumber == 1
        assert new_copied_URL.externalURL == repo_url
        assert new_copied_URL.dataFileHandleId != copied_URL_ent.dataFileHandleId
    finally:
        syn_other.logout()

    # ------------------------------------
    # TEST COPY LINKS
    # ------------------------------------
    print("Test: Copy Links")
    second_file = utils.make_bogus_data_file()
    #schedule_for_cleanup(filename)
    second_file_entity = syn.store(File(second_file, parent=project_entity))
    link_entity = Link(second_file_entity.id, parent=folder_entity.id)
    link_entity = syn.store(link_entity)

    #function under test uses queries which are eventually consistent but not immediately after creating the entities
    start_time = time.time()
    while syn.query("select id from entity where id=='%s'" %
                    link_entity.id).get('totalNumberOfResults') <= 0:
        assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
        time.sleep(2)

    copied_link = synapseutils.copy(syn,
                                    link_entity.id,
                                    destinationId=second_folder.id)
    old = syn.get(link_entity.id, followLink=False)
    new = syn.get(copied_link[link_entity.id], followLink=False)
    assert old.linksTo['targetId'] == new.linksTo['targetId']
    assert old.linksTo['targetVersionNumber'] == new.linksTo[
        'targetVersionNumber']

    schedule_for_cleanup(second_file_entity.id)
    schedule_for_cleanup(link_entity.id)
    schedule_for_cleanup(copied_link[link_entity.id])

    time.sleep(3)

    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  link_entity.id,
                  destinationId=second_folder.id)

    # ------------------------------------
    # TEST COPY TABLE
    # ------------------------------------
    second_project = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(second_project.id)
    print("Test: Copy Tables")
    cols = [
        Column(name='n', columnType='DOUBLE', maximumSize=50),
        Column(name='c', columnType='STRING', maximumSize=50),
        Column(name='i', columnType='INTEGER')
    ]
    data = [[2.1, 'foo', 10], [2.2, 'bar', 20], [2.3, 'baz', 30]]

    schema = syn.store(
        Schema(name='Testing', columns=cols, parent=project_entity.id))
    row_reference_set = syn.store(
        RowSet(columns=cols, schema=schema, rows=[Row(r) for r in data]))

    table_map = synapseutils.copy(syn,
                                  schema.id,
                                  destinationId=second_project.id)
    copied_table = syn.tableQuery('select * from %s' % table_map[schema.id])
    rows = copied_table.asRowSet()['rows']
    # TEST: Check if all values are the same
    for i, row in enumerate(rows):
        assert row['values'] == data[i]

    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  schema.id,
                  destinationId=second_project.id)

    schedule_for_cleanup(schema.id)
    schedule_for_cleanup(table_map[schema.id])

    # ------------------------------------
    # TEST COPY FOLDER
    # ------------------------------------
    print("Test: Copy Folder")
    mapping = synapseutils.copy(syn,
                                folder_entity.id,
                                destinationId=second_project.id)
    for i in mapping:
        old = syn.get(i, downloadFile=False)
        new = syn.get(mapping[i], downloadFile=False)
        assert old.name == new.name
        assert old.annotations == new.annotations
        assert old.concreteType == new.concreteType

    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  folder_entity.id,
                  destinationId=second_project.id)
    # TEST: Throw error if excludeTypes isn't in file, link and table or isn't a list
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  second_folder.id,
                  destinationId=second_project.id,
                  excludeTypes=["foo"])
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  second_folder.id,
                  destinationId=second_project.id,
                  excludeTypes="file")
    # TEST: excludeType = ["file"], only the folder is created
    second = synapseutils.copy(syn,
                               second_folder.id,
                               destinationId=second_project.id,
                               excludeTypes=["file", "table", "link"])

    copied_folder = syn.get(second[second_folder.id])
    assert copied_folder.name == second_folder.name
    assert len(second) == 1
    # TEST: Make sure error is thrown if foldername already exists
    start_time = time.time()
    while syn.query("select id from entity where id=='%s'" %
                    copied_folder.id).get('totalNumberOfResults') <= 0:
        assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
        time.sleep(2)

    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  second_folder.id,
                  destinationId=second_project.id)

    # ------------------------------------
    # TEST COPY PROJECT
    # ------------------------------------
    print("Test: Copy Project")
    third_project = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(third_project.id)

    mapping = synapseutils.copy(syn,
                                project_entity.id,
                                destinationId=third_project.id)
    for i in mapping:
        old = syn.get(i, downloadFile=False)
        new = syn.get(mapping[i], downloadFile=False)
        if not isinstance(old, Project):
            assert old.name == new.name
        assert old.annotations == new.annotations
        assert old.concreteType == new.concreteType

    # TEST: Can't copy project to a folder
    assert_raises(ValueError,
                  synapseutils.copy,
                  syn,
                  project_entity.id,
                  destinationId=second_folder.id)
コード例 #42
0
def test_encode_transfer_encoding():
    body = "long line " * 100
    encoded_body = encode_transfer_encoding('base64', body)
    # according to  RFC 5322 line "SHOULD be no more than 78 characters"
    assert_less(max([len(l) for l in encoded_body.splitlines()]), 79)
コード例 #43
0
def test_latency(p_send, p_recv):
  p_send.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
  p_recv.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
  p_send.set_can_loopback(False)
  p_recv.set_can_loopback(False)

  assert not p_send.legacy
  assert not p_recv.legacy

  p_send.set_can_speed_kbps(0, 100)
  p_recv.set_can_speed_kbps(0, 100)
  time.sleep(0.05)

  p_send.can_send_many([(0x1ba, 0, "testmsg", 0)]*10)
  time.sleep(0.05)
  p_recv.can_recv()
  p_send.can_recv()

  busses = [0,1,2]

  for bus in busses:
    for speed in [100, 250, 500, 750, 1000]:
      p_send.set_can_speed_kbps(bus, speed)
      p_recv.set_can_speed_kbps(bus, speed)
      time.sleep(0.1)

      #clear can buffers
      clear_can_buffers(p_send)
      clear_can_buffers(p_recv)

      latencies = []
      comp_kbps_list = []
      saturation_pcts = []

      num_messages = 100

      for i in range(num_messages):
        st = time.time()
        p_send.can_send(0x1ab, "message", bus)
        r = []
        while len(r) < 1 and (time.time() - st) < 5:
          r = p_recv.can_recv()
        et = time.time()
        r_echo = []
        while len(r_echo) < 1 and (time.time() - st) < 10:
          r_echo = p_send.can_recv()

        if len(r) == 0 or len(r_echo) == 0:
          print("r: {}, r_echo: {}".format(r, r_echo))

        assert_equal(len(r),1)
        assert_equal(len(r_echo),1)

        et = (et - st)*1000.0
        comp_kbps = (1+11+1+1+1+4+8*8+15+1+1+1+7) / et
        latency = et - ((1+11+1+1+1+4+8*8+15+1+1+1+7) / speed)

        assert_less(latency, 5.0)

        saturation_pct = (comp_kbps/speed) * 100.0
        latencies.append(latency)
        comp_kbps_list.append(comp_kbps)
        saturation_pcts.append(saturation_pct)

      average_latency = sum(latencies)/num_messages
      assert_less(average_latency, 1.0)
      average_comp_kbps = sum(comp_kbps_list)/num_messages
      average_saturation_pct = sum(saturation_pcts)/num_messages

      print("two pandas bus {}, {} message average at speed {:4d}, latency is {:5.3f}ms, comp speed is {:7.2f}, percent {:6.2f}"\
            .format(bus, num_messages, speed, average_latency, average_comp_kbps, average_saturation_pct))
コード例 #44
0
 def test_demo_task12ax(self):
     fer = run_config_get_fer("demos/demo-theano-task12ax.config")
     assert_less(fer, 0.01)
コード例 #45
0
 def test_plots_tight(self):
     frame = plots_to_frame(self.figures, bbox_inches='tight')
     assert_less(frame.shape[1:3], (384, 512))
コード例 #46
0
def test_conds_comp(db, fname, backend):
    conds = [('NucId', '==', 922350000), ('MassFrac', '>', 0.0072)]
    df = db.query("Compositions", conds)
    assert_less(0, len(df))
    for row in df['MassFrac']:
        assert_less(0.0072, row)
コード例 #47
0
    def test_smoothed_l1tv(self):

        import numpy as np

        from parsimony.functions import CombinedFunction
        import parsimony.algorithms.proximal as proximal
        import parsimony.functions as functions
        import parsimony.functions.penalties as penalties
        import parsimony.functions.nesterov.tv as tv
        import parsimony.functions.nesterov.l1tv as l1tv
        import parsimony.utils.start_vectors as start_vectors
        import parsimony.datasets.simulate as simulate

        np.random.seed(42)

        px = 10
        py = 1
        pz = 1
        shape = (pz, py, px)
        n, p = 5, np.prod(shape)

        l = 0.618
        k = 0.01
        g = 1.1

        start_vector = start_vectors.RandomStartVector(normalise=True)
        beta = start_vector.get_vector(p)

        alpha = 1.0
        Sigma = alpha * np.eye(p, p) \
            + (1.0 - alpha) * np.random.randn(p, p)
        mean = np.zeros(p)
        M = np.random.multivariate_normal(mean, Sigma, n)
        e = np.random.randn(n, 1)

        snr = 100.0

        mu = 5e-3

        A = tv.linear_operator_from_shape(shape)
        #        X, y, beta_star = l1_l2_tvmu.load(l=l, k=k, g=g, beta=beta, M=M, e=e,
        #                                        A=A, mu=mu, snr=snr)

        funs = [
            simulate.grad.L1(l),
            simulate.grad.L2Squared(k),
            simulate.grad.TotalVariation(g, A)
        ]
        lr = simulate.LinearRegressionData(funs,
                                           M,
                                           e,
                                           snr=snr,
                                           intercept=False)

        X, y, beta_star = lr.load(beta)

        eps = 1e-8
        max_iter = 810

        alg = proximal.FISTA(eps=eps, max_iter=max_iter)

        function = CombinedFunction()
        function.add_loss(functions.losses.LinearRegression(X, y, mean=False))
        function.add_penalty(penalties.L2Squared(l=k))
        A = l1tv.linear_operator_from_shape(shape, p)
        function.add_prox(l1tv.L1TV(l, g, A=A, mu=mu, penalty_start=0))
        #        A = tv.linear_operator_from_shape(shape)
        #        function.add_penalty(tv.TotalVariation(l=g, A=A, mu=mu,
        #                                               penalty_start=0))
        #        function.add_prox(penalties.L1(l=l))

        beta_start = start_vector.get_vector(p)
        beta = alg.run(function, beta_start)

        berr = np.linalg.norm(beta - beta_star)
        #        print "berr:", berr
        #        assert berr < 5e-1
        assert_less(berr, 5e-1, "The found regression vector is not correct.")

        f_parsimony = function.f(beta)
        f_star = function.f(beta_star)
        ferr = abs(f_parsimony - f_star)
        #        print "ferr:", ferr
        #        assert ferr < 5e-3
        assert_less(ferr, 5e-3, "The found regression vector is not correct.")
コード例 #48
0
def test_cmaes_minimize():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="standard",
                x0=np.zeros(2), random_state=0, maxfun=300)
    assert_less(f, 1e-5)
コード例 #49
0
def test_udp_doesnt_drop(serial=None):
    connect_wifi(serial)

    p = Panda(serial)
    p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
    p.set_can_loopback(True)

    pwifi = PandaWifiStreaming()
    while 1:
        if len(pwifi.can_recv()) == 0:
            break

    for msg_count in [1, 100]:
        saturation_pcts = []
        for i in range({1: 0x80, 100: 0x20}[msg_count]):
            pwifi.kick()

            speed = 500
            p.set_can_speed_kbps(0, speed)
            comp_kbps = time_many_sends(p,
                                        0,
                                        pwifi,
                                        msg_count=msg_count,
                                        msg_id=0x100 + i)
            saturation_pct = (comp_kbps / speed) * 100.0

            if msg_count == 1:
                sys.stdout.write(".")
                sys.stdout.flush()
            else:
                print(
                    "UDP WIFI loopback %d messages at speed %d, comp speed is %.2f, percent %.2f"
                    % (msg_count, speed, comp_kbps, saturation_pct))
                assert_greater(saturation_pct,
                               20)  #sometimes the wifi can be slow...
                assert_less(saturation_pct, 100)
                saturation_pcts.append(saturation_pct)
        if len(saturation_pcts) > 0:
            assert_greater(sum(saturation_pcts) / len(saturation_pcts), 60)

    time.sleep(5)
    usb_ok_cnt = 0
    REQ_USB_OK_CNT = 500
    st = time.time()
    msg_id = 0x1bb
    bus = 0
    last_missing_msg = 0
    while usb_ok_cnt < REQ_USB_OK_CNT and (time.time() - st) < 40:
        p.can_send(msg_id, "message", bus)
        time.sleep(0.01)
        r = [1]
        missing = True
        while len(r) > 0:
            r = p.can_recv()
            r = filter(lambda x: x[3] == bus and x[0] == msg_id, r)
            if len(r) > 0:
                missing = False
                usb_ok_cnt += len(r)
            if missing:
                last_missing_msg = time.time()
    et = time.time() - st
    last_missing_msg = last_missing_msg - st
    print(
        "waited {} for panda to recv can on usb, {} msgs, last missing at {}".
        format(et, usb_ok_cnt, last_missing_msg))
    assert usb_ok_cnt >= REQ_USB_OK_CNT, "Unable to recv can on USB after UDP"
コード例 #50
0
    def test_nonsmooth(self):

        import numpy as np

        import parsimony.utils.consts as consts
        from parsimony.functions import CombinedFunction
        import parsimony.algorithms.proximal as proximal
        import parsimony.functions.losses as losses
        import parsimony.functions.nesterov as nesterov
        import parsimony.utils.start_vectors as start_vectors
        import parsimony.datasets.simulate.l1_l2_tv as l1_l2_tv

        start_vector = start_vectors.RandomStartVector(normalise=True)

        np.random.seed(42)

        n, p = 75, 100

        alpha = 0.9
        V = np.random.randn(p, p)
        Sigma = alpha * np.eye(p, p) \
            + (1.0 - alpha) * np.dot(V.T, V)
        mean = np.zeros(p)
        M = np.random.multivariate_normal(mean, Sigma, n)
        e = np.random.randn(n, 1)

        beta_start = start_vector.get_vector(p)
        beta_start[np.abs(beta_start) < 0.1] = 0.0

        l = 0.618
        k = 0.0
        g = 0.0

        A = np.eye(p)
        A = [A, A, A]
        snr = 100.0
        X, y, beta_star = l1_l2_tv.load(l, k, g, beta_start, M, e, A, snr=snr)

        beta = beta_start

        for mu in [5e-2, 5e-3, 5e-4, 5e-5]:
            function = CombinedFunction()
            function.add_loss(losses.LinearRegression(X, y, mean=False))

            A = nesterov.l1.linear_operator_from_variables(p, penalty_start=0)
            function.add_penalty(nesterov.l1.L1(l, A=A, mu=mu,
                                                penalty_start=0))

            fista = proximal.FISTA(eps=consts.TOLERANCE, max_iter=2300)
            beta = fista.run(function, beta)

        berr = np.linalg.norm(beta - beta_star)
        #        print "berr:", berr
        #        assert berr < 5e-2
        assert_less(berr, 5e-2, "The found regression vector is not correct.")

        # Test proximal operator
        beta = beta_start
        function = CombinedFunction()
        function.add_loss(losses.LinearRegression(X, y, mean=False))
        A = nesterov.l1.linear_operator_from_variables(p, penalty_start=0)
        #        function.add_penalty(nesterov.l1.L1(l, A=A, mu=mu_min,
        #                                            penalty_start=penalty_start))
        function.add_prox(nesterov.l1.L1(l, A=A, mu=5e-5, penalty_start=0))

        fista = proximal.FISTA(eps=consts.TOLERANCE, max_iter=2000)
        beta = fista.run(function, beta)

        berr = np.linalg.norm(beta - beta_star)
        #        print "berr:", berr
        #        assert berr < 5e-0
        assert_less(berr, 5e-0, "The found regression vector is not correct.")
コード例 #51
0
def test_bipop_cmaes():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="bipop",
                x0=np.zeros(2), random_state=0, maxfun=300)
    assert_less(f, 1e-5)
コード例 #52
0
ファイル: test_util.py プロジェクト: demidov91/happybase
 def check(s_hex, expected):
     s = decode(s_hex, 'hex')
     v = util.bytes_increment(s)
     v_hex = encode(v, 'hex')
     assert_equal(expected, v_hex)
     assert_less(s, v)
コード例 #53
0
def test_cmaes_minimize_many_params():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="standard",
                x0=np.zeros(30), random_state=0, maxfun=500)
    assert_less(f, 1.0)
コード例 #54
0
def test_cmaes_minimize_eval_initial_x():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="standard",
                x0=np.ones(2), random_state=0, maxfun=300, eval_initial_x=True)
    assert_less(f, 1e-5)
コード例 #55
0
def test_ttest_rel():
    "Test testnd.ttest_rel()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_rel('uts',
                           'A%B', ('a1', 'b1'), ('a0', 'b0'),
                           'rm',
                           ds=ds,
                           samples=100)
    repr(res)

    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # collapsing cells
    res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_less(res2.p_uncorrected.min(), 0.05)
    assert_equal(res2.n, res.n)

    # reproducibility
    res3 = testnd.ttest_rel('uts',
                            'A%B', ('a1', 'b1'), ('a0', 'b0'),
                            'rm',
                            ds=ds,
                            samples=100)
    assert_dataset_equal(res3.find_clusters(maps=True), res.clusters)
    eelbrain._stats.testnd.MULTIPROCESSING = 0
    res4 = testnd.ttest_rel('uts',
                            'A%B', ('a1', 'b1'), ('a0', 'b0'),
                            'rm',
                            ds=ds,
                            samples=100)
    assert_dataset_equal(res4.find_clusters(maps=True), res.clusters)
    eelbrain._stats.testnd.MULTIPROCESSING = 1
    sds = ds.sub("B=='b0'")
    # thresholded, UTS
    eelbrain._stats.testnd.MULTIPROCESSING = 0
    res0 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    eelbrain._stats.testnd.MULTIPROCESSING = 1
    res1 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # thresholded, UTSND
    eelbrain._stats.testnd.MULTIPROCESSING = 0
    res0 = testnd.ttest_rel('utsnd',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    eelbrain._stats.testnd.MULTIPROCESSING = 1
    res1 = testnd.ttest_rel('utsnd',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # TFCE, UTS
    eelbrain._stats.testnd.MULTIPROCESSING = 0
    res0 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            tfce=True,
                            samples=10)
    tgt = res0.compute_probability_map()
    eelbrain._stats.testnd.MULTIPROCESSING = 1
    res1 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            tfce=True,
                            samples=10)
    assert_dataobj_equal(res1.compute_probability_map(), tgt)
コード例 #56
0
def test_create_and_update_file_view():

    # Create a folder
    folder = Folder(str(uuid.uuid4()),
                    parent=project,
                    description='creating a file-view')
    folder = syn.store(folder)

    # Create dummy file with annotations in our folder
    path = utils.make_bogus_data_file()
    file_annotations = dict(fileFormat='jpg',
                            dataType='image',
                            artist='Banksy',
                            medium='print',
                            title='Girl With Ballon')
    schedule_for_cleanup(path)
    a_file = File(path, parent=folder, annotations=file_annotations)
    a_file = syn.store(a_file)
    schedule_for_cleanup(a_file)

    # Add new columns for the annotations on this file and get their IDs
    my_added_cols = [
        syn.store(synapseclient.Column(name=k, columnType="STRING"))
        for k in file_annotations.keys()
    ]
    my_added_cols_ids = [c['id'] for c in my_added_cols]
    view_default_ids = [
        c['id'] for c in syn._get_default_entity_view_columns(
            EntityViewType.FILE.value)
    ]
    col_ids = my_added_cols_ids + view_default_ids
    scopeIds = [folder['id'].lstrip('syn')]

    # Create an empty entity-view with defined scope as folder

    entity_view = EntityViewSchema(name=str(uuid.uuid4()),
                                   scopeIds=scopeIds,
                                   addDefaultViewColumns=True,
                                   addAnnotationColumns=False,
                                   type='file',
                                   columns=my_added_cols,
                                   parent=project)

    entity_view = syn.store(entity_view)
    schedule_for_cleanup(entity_view)

    assert_equals(set(scopeIds), set(entity_view.scopeIds))
    assert_equals(set(col_ids), set(entity_view.columnIds))
    assert_equals(EntityViewType.FILE.value, entity_view.viewTypeMask)

    # get the current view-schema
    view = syn.tableQuery("select * from %s" % entity_view.id)
    schedule_for_cleanup(view.filepath)

    view_dict = list(
        csv.DictReader(io.open(view.filepath, encoding="utf-8", newline='')))

    # check that all of the annotations were retrieved from the view
    assert_true(
        set(file_annotations.keys()).issubset(set(view_dict[0].keys())))

    updated_a_file = syn.get(a_file.id, downloadFile=False)

    # Check that the values are the same as what was set
    # Both in the view and on the entity itself
    for k, v in file_annotations.items():
        assert_equals(view_dict[0][k], v)
        assert_equals(updated_a_file.annotations[k][0], v)

    # Make a change to the view and store
    view_dict[0]['fileFormat'] = 'PNG'

    with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as temp:
        schedule_for_cleanup(temp.name)
        temp_filename = temp.name

    with io.open(temp_filename, mode='w', encoding="utf-8",
                 newline='') as temp_file:
        dw = csv.DictWriter(temp_file,
                            fieldnames=view_dict[0].keys(),
                            quoting=csv.QUOTE_NONNUMERIC,
                            lineterminator=str(os.linesep))
        dw.writeheader()
        dw.writerows(view_dict)
        temp_file.flush()
    syn.store(synapseclient.Table(entity_view.id, temp_filename))
    new_view_dict = list(
        csv.DictReader(io.open(temp_filename, encoding="utf-8", newline='')))
    assert_equals(new_view_dict[0]['fileFormat'], 'PNG')

    # query for the change
    start_time = time.time()

    new_view_results = syn.tableQuery("select * from %s" % entity_view.id)
    schedule_for_cleanup(new_view_results.filepath)
    new_view_dict = list(
        csv.DictReader(
            io.open(new_view_results.filepath, encoding="utf-8", newline='')))
    # query until change is seen.
    while new_view_dict[0]['fileFormat'] != 'PNG':
        # check timeout
        assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
        # query again
        new_view_results = syn.tableQuery("select * from %s" % entity_view.id)
        new_view_dict = list(
            csv.DictReader(
                io.open(new_view_results.filepath,
                        encoding="utf-8",
                        newline='')))
    # paranoid check
    assert_equals(new_view_dict[0]['fileFormat'], 'PNG')
コード例 #57
0
def test_ttest_rel():
    "Test testnd.ttest_rel()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_rel('uts',
                           'A%B', ('a1', 'b1'), ('a0', 'b0'),
                           'rm',
                           ds=ds,
                           samples=100)
    eq_(
        repr(res), "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), "
        "'rm' (n=15), samples=100, p=.000>")

    # alternate argspec
    ds1 = Dataset()
    ds1['a1b1'] = ds.eval("uts[A%B == ('a1', 'b1')]")
    ds1['a0b0'] = ds.eval("uts[A%B == ('a0', 'b0')]")
    res1 = testnd.ttest_rel('a1b1', 'a0b0', ds=ds1, samples=100)
    assert_dataobj_equal(res1.t, res.t)
    eq_(repr(res1), "<ttest_rel 'a1b1', 'a0b0' (n=15), samples=100, p=.000>")

    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # collapsing cells
    res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_less(res2.p_uncorrected.min(), 0.05)
    assert_equal(res2.n, res.n)

    # reproducibility
    res3 = testnd.ttest_rel('uts',
                            'A%B', ('a1', 'b1'), ('a0', 'b0'),
                            'rm',
                            ds=ds,
                            samples=100)
    assert_dataset_equal(res3.find_clusters(maps=True), res.clusters)
    configure(n_workers=0)
    res4 = testnd.ttest_rel('uts',
                            'A%B', ('a1', 'b1'), ('a0', 'b0'),
                            'rm',
                            ds=ds,
                            samples=100)
    assert_dataset_equal(res4.find_clusters(maps=True), res.clusters)
    configure(n_workers=True)
    sds = ds.sub("B=='b0'")
    # thresholded, UTS
    configure(n_workers=0)
    res0 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    configure(n_workers=True)
    res1 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # thresholded, UTSND
    configure(n_workers=0)
    res0 = testnd.ttest_rel('utsnd',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    configure(n_workers=True)
    res1 = testnd.ttest_rel('utsnd',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # TFCE, UTS
    configure(n_workers=0)
    res0 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            tfce=True,
                            samples=10)
    tgt = res0.compute_probability_map()
    configure(n_workers=True)
    res1 = testnd.ttest_rel('uts',
                            'A',
                            'a1',
                            'a0',
                            'rm',
                            ds=sds,
                            tfce=True,
                            samples=10)
    assert_dataobj_equal(res1.compute_probability_map(), tgt)

    # zero variance
    ds['utsnd'].x[:, 1, 10] = 0.
    res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds)
    eq_(res.t.x[1, 10], 0)
コード例 #58
0
 def test_epoch(self):
     d = self.t('2008-04-03 16:06+0300')
     assert_less(M.epoch, d)
コード例 #59
0
def test_ttest_1samp():
    "Test testnd.ttest_1samp()"
    ds = datasets.get_uts(True)

    # no clusters
    res0 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds)
    assert_less(res0.p_uncorrected.min(), 0.05)
    repr0 = repr(res0)
    assert_in("'uts'", repr0)
    assert_not_in('clusters', repr0)
    assert_not_in('mintime', repr0)

    # sub as array
    res1 = testnd.ttest_1samp('uts', sub=ds.eval("A == 'a0'"), ds=ds)
    repr1 = repr(res1)
    assert_not_equal(repr1, repr0)

    # clusters without resampling
    res1 = testnd.ttest_1samp('uts',
                              sub="A == 'a0'",
                              ds=ds,
                              samples=0,
                              pmin=0.05,
                              tstart=0,
                              tstop=0.6,
                              mintime=0.05)
    assert_equal(res1.clusters.n_cases, 1)
    assert_not_in('p', res1.clusters)
    repr1 = repr(res1)
    assert_in('clusters', repr1)
    assert_in('samples', repr1)
    assert_in('mintime', repr1)

    # persistence
    string = pickle.dumps(res1, pickle.HIGHEST_PROTOCOL)
    res1_ = pickle.loads(string)
    assert_equal(repr(res1_), repr1)
    assert_dataobj_equal(res1.p_uncorrected, res1_.p_uncorrected)

    # clusters with resampling
    res2 = testnd.ttest_1samp('uts',
                              sub="A == 'a0'",
                              ds=ds,
                              samples=10,
                              pmin=0.05,
                              tstart=0,
                              tstop=0.6,
                              mintime=0.05)
    assert_equal(res2.clusters.n_cases, 1)
    assert_equal(res2.samples, 10)
    assert_in('p', res2.clusters)
    repr2 = repr(res2)
    assert_in('samples', repr2)

    # clusters with permutations
    dss = ds.sub("logical_and(A=='a0', B=='b0')")[:8]
    res3 = testnd.ttest_1samp('uts',
                              sub="A == 'a0'",
                              ds=dss,
                              samples=10000,
                              pmin=0.05,
                              tstart=0,
                              tstop=0.6,
                              mintime=0.05)
    assert_equal(res3.clusters.n_cases, 2)
    assert_equal(res3.samples, -1)
    assert_less(res3.clusters['p'].x.min(), 0.05)
    repr3 = repr(res3)
    assert_in('samples', repr3)

    # nd
    dss = ds.sub("A == 'a0'")
    res = testnd.ttest_1samp('utsnd', ds=dss, samples=1)
    res = testnd.ttest_1samp('utsnd', ds=dss, pmin=0.05, samples=1)
    res = testnd.ttest_1samp('utsnd', ds=dss, tfce=True, samples=1)

    # TFCE properties
    res = testnd.ttest_1samp('utsnd', sub="A == 'a0'", ds=ds, samples=1)
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res = pickle.loads(string)
    tfce_clusters = res.find_clusters(pmin=0.05)
    peaks = res.find_peaks()
    assert_equal(tfce_clusters.eval("p.min()"), peaks.eval("p.min()"))
    masked = res.masked_parameter_map(pmin=0.05)
    assert_array_equal(masked.abs().x <= res.t.abs().x, True)

    # zero variance
    ds['utsnd'].x[:, 1, 10] = 0.
    ds['utsnd'].x[:, 2, 10] = 0.1
    res = testnd.ttest_1samp('utsnd', ds=ds)
    eq_(res.t.x[1, 10], 0.)
    assert_greater(res.t.x[2, 10], 1e10)
コード例 #60
0
def test_clusterdist():
    "Test _ClusterDist class"
    shape = (10, 6, 6, 4)
    locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]
    x = np.random.normal(0, 1, shape)
    sensor = Sensor(locs, ['0', '1', '2', '3'])
    sensor.set_connectivity(connect_dist=1.1)
    dims = ('case', UTS(-0.1, 0.1, 6), Ordered('dim2', range(6),
                                               'unit'), sensor)
    y = NDVar(x, dims)

    # test connecting sensors
    logger.info("TEST:  connecting sensors")
    bin_map = np.zeros(shape[1:], dtype=np.bool8)
    bin_map[:3, :3, :2] = True
    pmap = np.random.normal(0, 1, shape[1:])
    np.clip(pmap, -1, 1, pmap)
    pmap[bin_map] = 2
    cdist = _ClusterDist(y, 0, 1.5)
    print repr(cdist)
    cdist.add_original(pmap)
    print repr(cdist)
    assert_equal(cdist.n_clusters, 1)
    assert_array_equal(cdist._original_cluster_map == cdist._cids[0],
                       cdist._crop(bin_map).swapaxes(0, cdist._nad_ax))
    assert_equal(cdist.parameter_map.dims, y.dims[1:])

    # test connecting many sensors
    logger.info("TEST:  connecting sensors")
    bin_map = np.zeros(shape[1:], dtype=np.bool8)
    bin_map[:3, :3] = True
    pmap = np.random.normal(0, 1, shape[1:])
    np.clip(pmap, -1, 1, pmap)
    pmap[bin_map] = 2
    cdist = _ClusterDist(y, 0, 1.5)
    cdist.add_original(pmap)
    assert_equal(cdist.n_clusters, 1)
    assert_array_equal(cdist._original_cluster_map == cdist._cids[0],
                       cdist._crop(bin_map).swapaxes(0, cdist._nad_ax))

    # test keeping sensors separate
    logger.info("TEST:  keeping sensors separate")
    bin_map = np.zeros(shape[1:], dtype=np.bool8)
    bin_map[:3, :3, 0] = True
    bin_map[:3, :3, 2] = True
    pmap = np.random.normal(0, 1, shape[1:])
    np.clip(pmap, -1, 1, pmap)
    pmap[bin_map] = 2
    cdist = _ClusterDist(y, 1, 1.5)
    cdist.add_original(pmap)
    assert_equal(cdist.n_clusters, 2)

    # criteria
    ds = datasets.get_uts(True)
    res = testnd.ttest_rel('utsnd',
                           'A',
                           match='rm',
                           ds=ds,
                           samples=0,
                           pmin=0.05)
    assert_less(res.clusters['duration'].min(), 0.01)
    eq_(res.clusters['n_sensors'].min(), 1)
    res = testnd.ttest_rel('utsnd',
                           'A',
                           match='rm',
                           ds=ds,
                           samples=0,
                           pmin=0.05,
                           mintime=0.02,
                           minsensor=2)
    assert_greater_equal(res.clusters['duration'].min(), 0.02)
    eq_(res.clusters['n_sensors'].min(), 2)

    # TFCE
    logger.info("TEST:  TFCE")
    sensor = Sensor(locs, ['0', '1', '2', '3'])
    sensor.set_connectivity(connect_dist=1.1)
    dims = ('case', UTS(-0.1, 0.1,
                        4), sensor, Ordered('dim2', range(10), 'unit'))
    y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims)
    cdist = _ClusterDist(y, 3, None)
    cdist.add_original(y.x[0])
    cdist.finalize()
    assert_equal(cdist.dist.shape, (3, ))
    # I/O
    string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL)
    cdist_ = pickle.loads(string)
    assert_equal(repr(cdist_), repr(cdist))
    # find peaks
    x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [7, 7, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                  [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [5, 7, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 6, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                  [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 7, 5, 5, 0, 0],
                   [0, 0, 0, 0, 5, 4, 4, 4, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                  [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 4, 0, 0],
                   [0, 0, 0, 0, 7, 0, 0, 3, 0, 0],
                   [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]])
    tgt = np.equal(x, 7)
    peaks = cdist._find_peaks(x)
    logging.debug(' detected: \n%s' % (peaks.astype(int)))
    logging.debug(' target: \n%s' % (tgt.astype(int)))
    assert_array_equal(peaks, tgt)

    mps = False, True
    thresholds = (None, 'tfce')
    for mp, threshold in product(mps, thresholds):
        logger.info("TEST:  multiprocessing=%r, threshold=%r" %
                    (mp, threshold))
        _testnd.multiprocessing = mp

        # test keeping dimension
        cdist = _ClusterDist(y, 5, threshold, dist_dim='sensor')
        print repr(cdist)
        cdist.add_original(y.x[0])
        print repr(cdist)
        assert_equal(cdist.dist.shape, (5, 4))

        # test keeping time bins
        cdist = _ClusterDist(y, 5, threshold, dist_tstep=0.2)
        cdist.add_original(y.x[0])
        assert_equal(cdist.dist.shape, (5, 2))
        assert_raises(ValueError,
                      _ClusterDist,
                      y,
                      5,
                      threshold,
                      dist_tstep=0.3)

        # test keeping dimension and time bins
        cdist = _ClusterDist(y,
                             5,
                             threshold,
                             dist_dim='sensor',
                             dist_tstep=0.2)
        cdist.add_original(y.x[0])
        assert_equal(cdist.dist.shape, (5, 4, 2))

        # test keeping 2 dimensions and time bins
        cdist = _ClusterDist(y,
                             5,
                             threshold,
                             dist_dim=('sensor', 'dim2'),
                             dist_tstep=0.2)
        cdist.add_original(y.x[0])
        assert_equal(cdist.dist.shape, (5, 4, 2, 10))