コード例 #1
0
def test_find_repetitive_in_range():
    pos = 0
    neg = 0
    # some should be positive, others negative
    for txid in known_juncs:
        expected   = txid in cross_hash_seqs
        if expected == True:
            pos += 1
        else:
            neg += 1
        my_query_juncs = query_juncs.get(txid,[])
        for query_junc in my_query_juncs:
            minus_range, plus_range = find_match_range(query_junc,seqs,20)
            yield check_find_repetitive_in_range, query_junc, minus_range, plus_range, expected
    
    # all negative
    for txid in unmatched_query_juncs:
        my_query_juncs = unmatched_query_juncs.get(txid,[])
        for query_junc in my_query_juncs:
            minus_range, plus_range = find_match_range(query_junc,seqs,20)
            yield check_find_repetitive_in_range, query_junc, minus_range, plus_range, False
    
    # make sure we found a bunch of each type
    assert_greater(pos,0)
    assert_greater(neg,0)
コード例 #2
0
ファイル: test_testing.py プロジェクト: Afey/scikit-learn
 def test_assert_greater():
     # Check that the nose implementation of assert_less gives the
     # same thing as the scikit's
     assert_greater(1, 0)
     _assert_greater(1, 0)
     assert_raises(AssertionError, assert_greater, 0, 1)
     assert_raises(AssertionError, _assert_greater, 0, 1)
コード例 #3
0
ファイル: test_chinese.py プロジェクト: dragon788/wordfreq
def test_tokens():
    # Let's test on some Chinese text that has unusual combinations of
    # syllables, because it is about an American vice-president.
    #
    # (He was the Chinese Wikipedia's featured article of the day when I
    # wrote this test.)

    hobart = '加勒特·霍巴特'  # Garret Hobart, or "jiā lè tè huò bā tè".

    # He was the sixth American vice president to die in office.
    fact_simplified  = '他是历史上第六位在任期内去世的美国副总统。'
    fact_traditional = '他是歷史上第六位在任期內去世的美國副總統。'

    # His name breaks into five pieces, with the only piece staying together
    # being the one that means 'Bart'. The dot is not included as a token.
    eq_(
        tokenize(hobart, 'zh'),
        ['加', '勒', '特', '霍', '巴特']
    )

    eq_(
        tokenize(fact_simplified, 'zh'),
        [
         # he / is / in history / #6 / counter for people
         '他', '是',  '历史上', '第六', '位',
         # during / term of office / in / die
         '在', '任期', '内', '去世',
         # of / U.S. / deputy / president
         '的', '美国', '副', '总统'
        ]
    )

    # You match the same tokens if you look it up in Traditional Chinese.
    eq_(tokenize(fact_simplified, 'zh'), tokenize(fact_traditional, 'zh'))
    assert_greater(word_frequency(fact_traditional, 'zh'), 0)
コード例 #4
0
ファイル: test_models.py プロジェクト: dlovell/distributions
def test_joint(module, EXAMPLE):
    # \cite{geweke04getting}
    seed_all(0)
    SIZE = 10
    SKIP = 100
    shared = module.Shared.from_dict(EXAMPLE['shared'])
    shared.realize()
    marginal_conditional_samples = defaultdict(lambda: [])
    successive_conditional_samples = defaultdict(lambda: [])
    cond_group = sample_marginal_conditional(module, shared, SIZE)
    for _ in xrange(SAMPLE_COUNT):
        marg_group = sample_marginal_conditional(module, shared, SIZE)
        _append_ss(marg_group, marginal_conditional_samples)

        for __ in range(SKIP):
            cond_group = sample_successive_conditional(
                module,
                shared,
                cond_group,
                SIZE)
        _append_ss(cond_group, successive_conditional_samples)
    for key in marginal_conditional_samples.keys():
        gof = scipy.stats.ttest_ind(
            marginal_conditional_samples[key],
            successive_conditional_samples[key])[1]
        print '{}:{} gof = {:0.3g}'.format(module.__name__, key, gof)
        if not numpy.isfinite(gof):
            raise SkipTest('Test fails with gof = {}'.format(gof))
        assert_greater(gof, MIN_GOODNESS_OF_FIT)
コード例 #5
0
ファイル: test_utils.py プロジェクト: lesteve/duecredit
def test_external_versions_basic():
    ev = ExternalVersions()
    assert_equal(ev._versions, {})
    assert_equal(ev["duecredit"], __version__)
    # and it could be compared
    assert_greater_equal(ev["duecredit"], __version__)
    assert_greater(ev["duecredit"], "0.1")

    # For non-existing one we get None
    assert_equal(ev["duecreditnonexisting"], None)
    # and nothing gets added to _versions for nonexisting
    assert_equal(set(ev._versions.keys()), {"duecredit"})

    # but if it is a module without version, we get it set to UNKNOWN
    assert_equal(ev["os"], ev.UNKNOWN)
    # And get a record on that inside
    assert_equal(ev._versions.get("os"), ev.UNKNOWN)
    # And that thing is "True", i.e. present
    assert ev["os"]
    # but not comparable with anything besides itself (was above)
    assert_raises(TypeError, cmp, ev["os"], "0")
    assert_raises(TypeError, assert_greater, ev["os"], "0")

    # And we can get versions based on modules themselves
    from duecredit.tests import mod

    assert_equal(ev[mod], mod.__version__)
コード例 #6
0
ファイル: test_query_math.py プロジェクト: fritzo/loom
def _check_marginal_samples_match_scores(server, row, fi):
    row = loom.query.protobuf_to_data_row(row.diff)
    row[fi] = None
    to_sample = [i == fi for i in range(len(row))]
    samples = server.sample(to_sample, row, SAMPLE_COUNT)
    val = samples[0][fi]
    base_score = server.score(row)
    if isinstance(val, bool) or isinstance(val, int):
        probs_dict = {}
        samples = [sample[fi] for sample in samples]
        for sample in set(samples):
            row[fi] = sample
            probs_dict[sample] = numpy.exp(
                server.score(row) - base_score)
        if len(probs_dict) == 1:
            assert_almost_equal(probs_dict[sample], 1., places=SCORE_PLACES)
            return
        if min(probs_dict.values()) < MIN_CATEGORICAL_PROB:
            return
        gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
    elif isinstance(val, float):
        probs = numpy.exp([
            server.score(sample) - base_score
            for sample in samples
        ])
        samples = [sample[fi] for sample in samples]
        gof = density_goodness_of_fit(samples, probs, plot=True)
    assert_greater(gof, MIN_GOODNESS_OF_FIT)
コード例 #7
0
def test_def_rxtr_req_sample():
    s = RandomRequestPoint()
    assert_equal(1, s.n_commods.sample())
    assert_equal(1, s.n_request.sample())
    assert_equal(1, s.assem_per_req.sample())
    assert_false(s.assem_multi_commod.sample())
    assert_equal(0, s.req_multi_commods.sample())
    assert_false(s.exclusive.sample())
    assert_equal(0, s.n_req_constr.sample())
    assert_equal(1, s.n_supply.sample())
    assert_equal(0, s.sup_multi.sample())
    assert_equal(0, s.sup_multi_commods.sample())
    assert_equal(1, s.n_sup_constr.sample())
    assert_equal(1, s.sup_constr_val.sample())
    assert_true(s.connection.sample())
    s1 = RandomRequestPoint()
    assert_equal(s1, s)
    constr_avg = 0
    pref_avg = 0
    n = 5000
    for i in range(n):
        constr = s.constr_coeff.sample()
        constr_avg += constr
        assert_greater(constr,  0)
        assert_less_equal(constr,  2)
        pref = s.pref_coeff.sample()
        pref_avg += pref
        assert_greater(pref,  0)
        assert_less_equal(pref,  1)
    assert_almost_equal(1.0, constr_avg / n, places=1)
    assert_almost_equal(0.5, pref_avg / n, places=1)
コード例 #8
0
    def test_multiple_variants_are_roughly_equal(self):
        e = self.build_two_variant_experiment()

        assignments = self.generate_assignments(e)

        assert_greater(assignments[self.VARIANT1], 30)
        assert_greater(assignments[self.VARIANT2], 30)
コード例 #9
0
    def test_create_tiids_from_aliases(self):

        aliases = [('url', 'http://starbucks.com'), ('url', 'http://www.plosmedicine.org/article/info:doi/10.1371/journal.pmed.0020124')]

        response = item_module.create_tiids_from_aliases(aliases, self.r)
        print response
        assert_greater(len(response.keys()))
コード例 #10
0
def test_constraint_removal():
    digits = load_digits()
    X, y = digits.data, digits.target
    y = 2 * (y % 2) - 1  # even vs odd as +1 vs -1
    X = X / 16.
    pbl = BinaryClf(n_features=X.shape[1])
    clf_no_removal = OneSlackSSVM(model=pbl, max_iter=500, C=1,
                                  inactive_window=0, tol=0.01)
    clf_no_removal.fit(X, y)
    clf = OneSlackSSVM(model=pbl, max_iter=500, C=1, tol=0.01,
                       inactive_threshold=1e-8)
    clf.fit(X, y)
    # check that we learned something
    assert_greater(clf.score(X, y), .92)

    # results are mostly equal
    # if we decrease tol, they will get more similar
    assert_less(np.mean(clf.predict(X) != clf_no_removal.predict(X)), 0.02)

    # without removal, have as many constraints as iterations
    assert_equal(len(clf_no_removal.objective_curve_),
                 len(clf_no_removal.constraints_))

    # with removal, there are less constraints than iterations
    assert_less(len(clf.constraints_),
                len(clf.objective_curve_))
コード例 #11
0
ファイル: geo.py プロジェクト: pantuza/lastfm
 def test_get_metro_artist_chart(self):
     """ Testing Geo get Metro artist """
     metro = "madrid"
     country = "spain"
     chart = self.geo.get_metro_artist_chart(metro=metro, country=country)
     self.utils.assert_response_content(chart)
     assert_greater(len(chart["topartists"]["artist"]), 5)
コード例 #12
0
ファイル: test_rkdae.py プロジェクト: LongyanU/odelab
	def test_quasigraph(self, plot=False):
		sol = self.solver
		errz = []
		errl = []
		ks = np.arange(1,5)
		for k in ks:
			self.scheme.h = pow(2,-k)
			sol.initialize(u0=self.u0,time=1, name='{0}_{1}'.format(type(self).__name__, k))
			sol.run()
			zexact = sol.system.exact(sol.final_time(),self.u0)[0]
			lexact = sol.system.exact(sol.final_time(),self.u0)[2]
			df = sol.final()[0] - zexact
			logerrz = np.log2(np.abs(df))
			logerrl = np.log2(np.abs(sol.final()[2] - lexact))
			errz.append(logerrz)
			errl.append(logerrl)
		plt.clf()
		plt.subplot(1,2,1)
		plt.title('z')
		regz = order.linear_regression(ks,errz,do_plot=True)
		plt.plot(ks,errz,'o-')
		plt.legend()
		plt.subplot(1,2,2)
		plt.title(u'λ')
		regl = order.linear_regression(ks,errl,do_plot=True)
		plt.plot(ks,errl,'o-')
		plt.legend()
		oz = -regz[0]
		ol = -regl[0]
		nt.assert_greater(ol, self.expected_orders[0] - self.tol)
		nt.assert_greater(oz, self.expected_orders[1] - self.tol)
		return sol
コード例 #13
0
    def test_create_missing_tiids_from_aliases(self):

        aliases_tiids_map = {('url', 'http://starbucks.com'): None, ('url', 'http://www.plosmedicine.org/article/info:doi/10.1371/journal.pmed.0020124'): u'test'}

        response = item_module.create_missing_tiids_from_aliases(aliases_tiids_map, self.r)
        print response
        assert_greater(len(aliases_tiids_map[('url', 'http://starbucks.com')]), 10)
コード例 #14
0
ファイル: test_single.py プロジェクト: chagge/xtas
def test_dbpedia_spotlight():
    en_text = (u"Will the efforts of artists like Moby"
               u" help to preserve the Arctic?")
    nl_text = (u"Ik kan me iets herrinneren over de burgemeester van"
               u" Amstelveen en het achterwerk van M\xe1xima."
               u" Verder was Koningsdag een zwart gat.")

    en_annotations = dbpedia_spotlight(en_text, lang='en')
    nl_annotations = dbpedia_spotlight(nl_text, lang='nl')

    # Expect `Arctic` and `Moby` to be found in en_text
    assert_equal(len(en_annotations), 2)
    for ann in en_annotations:
        assert_in(ann['name'], {'Arctic', 'Moby'})
        # The disambiguation candidates should be of type list
        assert_true(isinstance(ann['resource'], list))
        # In this case, the top candidate's uri == the name
        assert_equal(ann['name'], ann['resource'][0]['uri'])

    # Expect {"burgemeester", "Amstelveen", u"M\xe1xima",
    # "Koningsdag", "zwart gat"} to be found in nl_text
    assert_equal(len(nl_annotations), 5)
    sf_set = set([ann['name'] for ann in nl_annotations])
    assert_equal(sf_set, {u"burgemeester", u"Amstelveen", u"M\xe1xima",
                          u"Koningsdag", u"zwart gat"})
    for ann in en_annotations:
        # The disambiguation candidates should be of type list
        assert_true(isinstance(ann['resource'], list))
        # There should be at least one candidate
        assert_greater(ann['resource'], 0)
コード例 #15
0
ファイル: test_synapse_types.py プロジェクト: antolikjan/PyNN
def test_simple_stochastic_synapse(sim, plot_figure=False):
    # in this test we connect
    sim.setup(min_delay=0.5)
    t_stop = 1000.0
    spike_times = np.arange(2.5, t_stop, 5.0)
    source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times))
    neurons = sim.Population(4, sim.IF_cond_exp(tau_syn_E=1.0))
    synapse_type = sim.SimpleStochasticSynapse(weight=0.5,
                                               p=np.array([[0.0, 0.5, 0.5, 1.0]]))
    connections = sim.Projection(source, neurons, sim.AllToAllConnector(),
                                 synapse_type=synapse_type)
    source.record('spikes')
    neurons.record('gsyn_exc')
    sim.run(t_stop)

    data = neurons.get_data().segments[0]
    gsyn = data.analogsignals[0].rescale('uS')
    if plot_figure:
        import matplotlib.pyplot as plt
        for i in range(neurons.size):
            plt.subplot(neurons.size, 1, i+1)
            plt.plot(gsyn.times, gsyn[:, i])
        plt.savefig("test_simple_stochastic_synapse_%s.png" % sim.__name__)
    print(data.analogsignals[0].units)
    crossings = []
    for i in range(neurons.size):
        crossings.append(
                gsyn.times[:-1][np.logical_and(gsyn.magnitude[:-1, i] < 0.4, 0.4 < gsyn.magnitude[1:, i])])
    assert_equal(crossings[0].size, 0)
    assert_less(crossings[1].size, 0.6*spike_times.size)
    assert_greater(crossings[1].size, 0.4*spike_times.size)
    assert_equal(crossings[3].size, spike_times.size)
    assert_not_equal(crossings[1], crossings[2])
    print(crossings[1].size / spike_times.size)
    return data
コード例 #16
0
def test_endianness():
    """When creating a named_bitfield, the first field is the most significant
    """
    nbf = named_bitfield('TestBitfield', [('a', 4), ('b', 4)])
    test1 = nbf(0, 15)
    test2 = nbf(15, 0)
    assert_greater(test2, test1)
コード例 #17
0
 def max_norm_arg(arg):
     arg = float(arg)
     if arg < 0.0:
         return numpy.inf
     else:
         assert_greater(arg, 0.0)
         return arg
コード例 #18
0
ファイル: test_suspenders.py プロジェクト: klauer/bluesky
def _test_suspender(suspender_class, sc_args, start_val, fail_val, resume_val, wait_time):

    if sys.platform == "darwin":
        # OSX event loop is different; resolve this later
        raise KnownFailureTest()
    my_suspender = suspender_class(RE, "BSTEST:VAL", *sc_args, sleep=wait_time)
    print(my_suspender._lock)
    pv = epics.PV("BSTEST:VAL")
    putter = partial(pv.put, wait=True)
    # make sure we start at good value!
    putter(start_val)
    # dumb scan
    scan = [Msg("checkpoint"), Msg("sleep", None, 0.2)]
    # paranoid
    assert_equal(RE.state, "idle")

    start = ttime.time()
    # queue up fail and resume conditions
    loop.call_later(0.1, putter, fail_val)
    loop.call_later(1, putter, resume_val)
    # start the scan
    RE(scan)
    stop = ttime.time()
    # paranoid clean up of pv call back
    my_suspender._pv.disconnect()
    # assert we waited at least 2 seconds + the settle time
    print(stop - start)
    assert_greater(stop - start, 1 + wait_time + 0.2)
コード例 #19
0
ファイル: test_lbfgs.py プロジェクト: wolfgang-noichl/pylbfgs
def test_2d():
    def f(x, g, f_calls):
        #f_calls, = args
        assert_equal(x.shape, (2, 2))
        assert_equal(g.shape, x.shape)
        g[:] = 2 * x
        f_calls[0] += 1
        return (x ** 2).sum()

    def progress(x, g, fx, xnorm, gnorm, step, k, ls, *args):
        assert_equal(x.shape, (2, 2))
        assert_equal(g.shape, x.shape)

        assert_equal(np.sqrt((x ** 2).sum()), xnorm)
        assert_equal(np.sqrt((g ** 2).sum()), gnorm)

        p_calls[0] += 1
        return 0

    f_calls = [0]
    p_calls = [0]

    xmin = fmin_lbfgs(f, [[10., 100.], [44., 55.]], progress, args=[f_calls])
    assert_greater(f_calls[0], 0)
    assert_greater(p_calls[0], 0)
    assert_array_almost_equal(xmin, [[0, 0], [0, 0]])
コード例 #20
0
 def test_userpass_success(self):
     """AUTHENTICATION (REST): Username and password (correct credentials)."""
     mw = []
     headers = {'X-Rucio-Account': 'root', 'X-Rucio-Username': '******', 'X-Rucio-Password': '******'}
     r = TestApp(app.wsgifunc(*mw)).get('/userpass', headers=headers, expect_errors=True)
     assert_equal(r.status, 200)
     assert_greater(len(r.header('X-Rucio-Auth-Token')), 32)
コード例 #21
0
ファイル: test_api.py プロジェクト: VarnaSuresh/oneanddone
    def test_create_new_task(self):
        """
        Test Create new task
        """
        self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
        team = TaskTeamFactory.create()
        project = TaskProjectFactory.create()
        type = TaskTypeFactory.create()

        task_data = {"name": "Sample Task",
                     "short_description": "Task Desc",
                     "instructions": "Task Inst",
                     "prerequisites": "Task Prerequisite",
                     "execution_time": 30,
                     "is_draft": False,
                     "is_invalid": False,
                     "project": project.name,
                     "team": team.name,
                     "type": type.name,
                     "repeatable": False,
                     "start_date": None,
                     "end_date": None,
                     "difficulty": 1,
                     "why_this_matters": "Task matters",
                     "keyword_set": [{"name": "testing"}, {"name": "mozwebqa"}],
                     "taskattempt_set": [{"user": self.client_user.email, "state": 0}],
                     "owner": self.client_user.email}

        response = self.client.post(self.uri, task_data, format='json')
        self.assert_response_status(response, status.HTTP_201_CREATED)
        response_data = json.loads(response.content)
        assert_greater(response_data['id'], 0)
        del response_data['id']
        eq_(sorted(response_data), sorted(task_data))
コード例 #22
0
def test_switch_to_ad3():
    # test if switching between qpbo and ad3 works

    if not get_installed(['qpbo']) or not get_installed(['ad3']):
        return
    X, Y = toy.generate_blocks_multinomial(n_samples=5, noise=1.5,
                                           seed=0)
    crf = GridCRF(n_states=3, inference_method='qpbo')

    ssvm = NSlackSSVM(crf, max_iter=10000)

    ssvm_with_switch = NSlackSSVM(crf, max_iter=10000, switch_to=('ad3'))
    ssvm.fit(X, Y)
    ssvm_with_switch.fit(X, Y)
    assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
    # we check that the dual is higher with ad3 inference
    # as it might use the relaxation, that is pretty much guraranteed
    assert_greater(ssvm_with_switch.objective_curve_[-1],
                   ssvm.objective_curve_[-1])
    print(ssvm_with_switch.objective_curve_[-1], ssvm.objective_curve_[-1])

    # test that convergence also results in switch
    ssvm_with_switch = NSlackSSVM(crf, max_iter=10000, switch_to=('ad3'),
                                  tol=10)
    ssvm_with_switch.fit(X, Y)
    assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
コード例 #23
0
    def test_post_with_aliases_already_in_db(self):
        items = [
            ["doi", "10.123"],
            ["doi", "10.124"],
            ["doi", "10.125"]
        ]
        resp = self.client.post(
            '/collection',
            data=json.dumps({"aliases": items, "title":"mah collection"}),
            content_type="application/json"
        )
        coll = json.loads(resp.data)["collection"]

        new_items = [
            ["doi", "10.123"], # duplicate
            ["doi", "10.124"], # duplicate
            ["doi", "10.999"]  # new
        ]

        resp2 = self.client.post(
            '/collection',
            data=json.dumps({"aliases": new_items, "title": "mah_collection"}),
            content_type="application/json"
        )
        new_coll = json.loads(resp2.data)["collection"]

        # 3+1 new items + 2 collections + 1 test_item + 1 api_user_doc + at least 7 design docs
        assert_greater(self.d.db.info()["doc_count"], 15)
コード例 #24
0
ファイル: test_spy.py プロジェクト: sylvainrocheleau/torabot
def test_spy_query():
    app = make()
    with app.app_context():
        d = mod(name).spy("language:chinese", 60)
        assert_greater(len(d.posts), 0)
        d = mod(name).spy("http://g.e-hentai.org/?f_search=language%3Achinese", 60)
        assert_greater(len(d.posts), 0)
コード例 #25
0
ファイル: test_graph_svm.py プロジェクト: DATAQC/pystruct
def test_standard_svm_blobs_2d_class_weight():
    # no edges, reduce to crammer-singer svm
    X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3,
                      shuffle=False)
    X = np.hstack([X, np.ones((X.shape[0], 1))])
    X, Y = X[:170], Y[:170]

    X_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X]

    pbl = GraphCRF(n_features=3, n_states=3, inference_method='unary')
    svm = OneSlackSSVM(pbl, check_constraints=False, C=1000)

    svm.fit(X_graphs, Y[:, np.newaxis])

    weights = 1. / np.bincount(Y)
    weights *= len(weights) / np.sum(weights)

    pbl_class_weight = GraphCRF(n_features=3, n_states=3, class_weight=weights,
                                inference_method='unary')
    svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10,
                                    check_constraints=False,
                                    break_on_bad=False)
    svm_class_weight.fit(X_graphs, Y[:, np.newaxis])

    assert_greater(f1_score(Y, np.hstack(svm_class_weight.predict(X_graphs))),
                   f1_score(Y, np.hstack(svm.predict(X_graphs))))
コード例 #26
0
    def test_start_run(self):
        # Just instantiating the object shouldn't create anything
        assert_false(os.path.exists(self.result_file_path))

        self.run_results.start_run(self.scenario)

        # start_run opens the file for writing and dumps out the scenario.
        assert_equal(len(self.scenario.packb()), self._current_size())

        # This is a bit white-box, but that's what we're here for...
        assert_greater(self._current_size(), 0)

        with open(self.result_file_path, "rb") as f:
            unpacker = msgpack.Unpacker(file_like=f)
            got_scenario = Scenario.unpackb(unpacker)
            for attr in [
                "name",
                "_scenario_data",
                "user_count",
                "operation_count",
                "run_seconds",
                "container_base",
                "container_count",
                "containers",
                "container_concurrency",
                "sizes_by_name",
                "version",
                "bench_size_thresholds",
            ]:
                assert_equal(getattr(got_scenario, attr), getattr(self.scenario, attr))
コード例 #27
0
def test_sample_matches_score_counts(Model, EXAMPLE, sample_count):
    for size in iter_valid_sizes(EXAMPLE, max_size=10):
        model = Model()
        model.load(EXAMPLE)

        samples = []
        probs_dict = {}
        for _ in xrange(sample_count):
            value = model.sample_assignments(size)
            sample = canonicalize(value)
            samples.append(sample)
            if sample not in probs_dict:
                assignments = dict(enumerate(value))
                counts = count_assignments(assignments)
                prob = math.exp(model.score_counts(counts))
                probs_dict[sample] = prob

        # renormalize here; test normalization separately
        total = sum(probs_dict.values())
        for key in probs_dict:
            probs_dict[key] /= total

        gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
        print '{} gof = {:0.3g}'.format(Model.__name__, gof)
        assert_greater(gof, MIN_GOODNESS_OF_FIT)
コード例 #28
0
ファイル: 2_usb_to_can.py プロジェクト: n2aws/panda
def test_gmlan_bad_toggle():
  p = connect_wo_esp()

  if p.legacy:
    return

  # enable output mode
  p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)

  # enable CAN loopback mode
  p.set_can_loopback(True)

  # GMLAN_CAN2
  for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
    p.set_gmlan(bus)
    comp_kbps_gmlan = time_many_sends(p, 3)
    assert_greater(comp_kbps_gmlan, 0.6 * SPEED_GMLAN)
    assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)

  # normal
  for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
    p.set_gmlan(None)
    comp_kbps_normal = time_many_sends(p, bus)
    assert_greater(comp_kbps_normal, 0.6 * SPEED_NORMAL)
    assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)
コード例 #29
0
ファイル: 2_usb_to_can.py プロジェクト: n2aws/panda
def test_gmlan():
  p = connect_wo_esp()

  if p.legacy:
    return

  # enable output mode
  p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)

  # enable CAN loopback mode
  p.set_can_loopback(True)

  p.set_can_speed_kbps(1, SPEED_NORMAL)
  p.set_can_speed_kbps(2, SPEED_NORMAL)
  p.set_can_speed_kbps(3, SPEED_GMLAN)
 
  # set gmlan on CAN2
  for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3, Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
    p.set_gmlan(bus)
    comp_kbps_gmlan = time_many_sends(p, 3)
    assert_greater(comp_kbps_gmlan, 0.8 * SPEED_GMLAN)
    assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)

    p.set_gmlan(None)
    comp_kbps_normal = time_many_sends(p, bus)
    assert_greater(comp_kbps_normal, 0.8 * SPEED_NORMAL)
    assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)

    print("%d: %.2f kbps vs %.2f kbps" % (bus, comp_kbps_gmlan, comp_kbps_normal))
コード例 #30
0
def test_mask_contour():
    # test mask contour is created, learn more at:
    # https://github.com/amueller/word_cloud/pull/348#issuecomment-370883873
    mask = np.zeros((234, 456), dtype=np.int)
    mask[100:150, 300:400] = 255

    sm = WordCloud(mask=mask, contour_width=1, contour_color='blue')
    sm.generate(THIS)
    sm_array = np.array(sm)
    sm_total = sm_array[100:150, 300:400].sum()

    lg = WordCloud(mask=mask, contour_width=20, contour_color='blue')
    lg.generate(THIS)
    lg_array = np.array(lg)
    lg_total = lg_array[100:150, 300:400].sum()

    sc = WordCloud(mask=mask, contour_width=1, scale=2, contour_color='blue')
    sc.generate(THIS)
    sc_array = np.array(sc)
    sc_total = sc_array[100:150, 300:400].sum()

    # test `contour_width`
    assert_greater(lg_total, sm_total)

    # test contour varies with `scale`
    assert_greater(sc_total, sm_total)

    # test `contour_color`
    assert_true(all(sm_array[100, 300] == [0, 0, 255]))
コード例 #31
0
def test_d2_3_eval_acc():
    global bi_text_test, bt_c2i, bt_i2c, bt_l2i, bt_i2l

    untrained = lang_id.LangID(input_vocab_n=len(bt_c2i),
                               embedding_dims=10,
                               hidden_dims=20,
                               lstm_layers=1,
                               output_class_n=2)

    # should be ≈50% accuracy
    acc_untrained, y_hat_untrained = lang_id.eval_acc(untrained, bi_text_test,
                                                      bt_c2i, bt_i2c, bt_l2i,
                                                      bt_i2l)
    assert_greater(acc_untrained, 0.4)
    assert_less(acc_untrained, 0.6)

    eq_(len(y_hat_untrained), len(bi_text_test))
コード例 #32
0
    def target_timeout_test(self):
        """ joing with timeout should gracefully continue """
        def infinite_target(stop_event):
            while not stop_event.is_set():
                stop_event.wait(1)

        st = StoppableExceptionThread(target=infinite_target)
        st.start()
        t = time()
        st.join(timeout=1.0)
        assert_greater(1.01, time() - t) # allow 10 ms for overhead
        assert_equals(st.stopped, False)
        assert_equals(st.is_alive(), True)
        st.stop()
        st.join()
        assert_equals(st.stopped, True)
        assert_equals(st.is_alive(), False)
コード例 #33
0
        def check_mongo_fields():
            def get_item(location):
                return self.draft_store._find_one(as_draft(location))

            def check_children(payload):
                for child in payload['definition']['children']:
                    assert_is_instance(child, basestring)

            refele = get_item(self.refloc)
            check_children(refele)
            assert_is_instance(refele['definition']['data']['reference_link'], basestring)
            assert_greater(len(refele['definition']['data']['reference_list']), 0)
            for ref in refele['definition']['data']['reference_list']:
                assert_is_instance(ref, basestring)
            assert_greater(len(refele['metadata']['reference_dict']), 0)
            for ref in refele['metadata']['reference_dict'].itervalues():
                assert_is_instance(ref, basestring)
コード例 #34
0
ファイル: test_interface.py プロジェクト: tarpn/aioax25
def test_transmit_waits_if_cts_reset():
    """
    Test the interface waits if CTS timer is reset.
    """
    my_port = DummyKISS()
    my_frame = AX25UnnumberedInformationFrame(destination='VK4BWI-4',
                                              source='VK4MSL',
                                              pid=0xf0,
                                              payload=b'testing')
    transmit_future = Future()

    my_interface = AX25Interface(my_port, cts_delay=0.250)

    def _on_transmit(interface, frame, **kwargs):
        try:
            eq_(len(kwargs), 0, msg='Too many arguments')
            assert_is(interface, my_interface, msg='Wrong interface')
            eq_(bytes(frame), bytes(my_frame), msg='Wrong frame')
            transmit_future.set_result(None)
        except Exception as e:
            transmit_future.set_exception(e)

    def _on_timeout():
        transmit_future.set_exception(AssertionError('Timed out'))

    # The time before transmission
    time_before = time.monotonic()

    # Set a timeout
    get_event_loop().call_later(1.0, _on_timeout)

    # Send the message
    my_interface.transmit(my_frame, _on_transmit)

    # Whilst that is pending, call reset_cts, this should delay transmission
    my_interface._reset_cts()

    yield from transmit_future

    eq_(len(my_port.sent), 1)
    (send_time, sent_frame) = my_port.sent.pop(0)

    eq_(bytes(sent_frame), bytes(my_frame))
    assert_less((time.monotonic() - send_time), 0.05)
    assert_greater(send_time - time_before, 0.25)
    assert_less(send_time - time_before, 1.05)
コード例 #35
0
    def test_copy_samples_single_reverse(self):

        path_src = os.path.join(self.dir_tmp, 'x_lmdb')
        x = r.read_values(path_src)
        path_dst = os.path.join(self.dir_tmp, 'test_copy_samples_single_lmdb')
        assert_greater(len(x), 0, "This test needs non empty data.")
        for i in range(len(x))[::-1]:
            if os.path.isdir(path_dst):
                shutil.rmtree(path_dst)
            c.copy_samples_lmdb(path_src, path_dst, [i])
            assert_true(os.path.isdir(path_dst),
                        "failed to save LMDB for i=%d" % (i, ))

            y = r.read_values(path_dst)
            assert_equal(len(y), 1, "Single element expected.")
            assert_true(np.all(x[i][0] == y[0][0]), "Wrong content copied.")
            assert_true(np.all(x[i][1] == y[0][1]), "Wrong content copied.")
コード例 #36
0
    def run_with_exception_timeout_test(self):
        """ Subclass StopabbleExceptionThrad with long running `run_with_exception` """
        class InfiniteThread(StoppableExceptionThread):
            def run_with_exception(self):
                while not self._stop.is_set():
                    self._stop.wait(1.0)

        st = InfiniteThread()
        st.start()
        t = time()
        st.join(timeout=1.0)
        assert_greater(1.01, time() - t) # allow 10 ms for overhead
        assert_equals(st.stopped, False)
        assert_equals(st.is_alive(), True)
        st.stop()
        st.join()
        assert_equals(st.is_alive(), False)
コード例 #37
0
        def check_xblock_fields():
            def check_children(xblock):
                for child in xblock.children:
                    assert_is_instance(child, UsageKey)

            course = self.draft_store.get_course(course_key)
            check_children(course)

            refele = self.draft_store.get_item(self.refloc)
            check_children(refele)
            assert_is_instance(refele.reference_link, UsageKey)
            assert_greater(len(refele.reference_list), 0)
            for ref in refele.reference_list:
                assert_is_instance(ref, UsageKey)
            assert_greater(len(refele.reference_dict), 0)
            for ref in refele.reference_dict.itervalues():
                assert_is_instance(ref, UsageKey)
コード例 #38
0
ファイル: test_svm.py プロジェクト: reshmamustafa/svm
def test_poly_svm():
    random_state = np.random.RandomState(0)
    n_samples = 100
    X = np.empty((n_samples, 2))
    X[:, 0] = np.linspace(0, 1, n_samples)
    X[:, 1] = random_state.randn(n_samples)
    y = np.sign(X[:, 1] - np.sin(2.0 * np.pi * np.sin(X[:, 0])))

    svm = SVM(kernel="poly",
              C=1.0,
              degree=3,
              coef0=1.0,
              random_state=random_state)
    svm.fit(X, y)
    y_pred = svm.predict(X)

    assert_greater(accuracy_score(y, y_pred), 0.9)
コード例 #39
0
def test_constant_gaussian_diag_covariance():
    random_state = np.random.RandomState(0)

    n_samples = 10000
    n_weights = 5
    mean = np.ones(n_weights)
    ulp = ConstantGaussianPolicy(
        n_weights, covariance="diag", mean=mean,
        covariance_scale=1.0, random_state=random_state)
    Y = mean + random_state.randn(n_samples, n_weights)
    ulp.fit(None, Y, np.ones(n_samples))
    estimated_mean = ulp(explore=False)
    assert_array_almost_equal(mean, estimated_mean, decimal=2)

    p = ulp.probabilities([mean])
    p2 = ulp.probabilities([mean + 1.0])
    assert_greater(p, p2)
コード例 #40
0
def test_cards_length_difference():

    pivot = len(TRJ) // 4

    r1 = cards.cards([TRJ])
    r2 = cards.cards([TRJ[pivot:], TRJ[0:pivot]])

    # import matplotlib.pyplot as plt
    # plt.scatter(r1[1].flatten(), r2[2].flatten())
    # plt.show()

    assert_allclose(r1[0], r2[0], rtol=1e-12)
    assert_correlates(r1[0], r2[0])

    assert_greater(pearsonr(r1[3].flatten(), r2[3].flatten())[0], 0.8)

    assert_array_equal(r1[4], r2[4])
コード例 #41
0
 def test_procreation(self):
     """
     Test that the number of animals increase
     :return:
     """
     Herbivore.set_parameters({"gamma": 1})
     Carnivore.set_parameters({"gamma": 1})
     original = 60
     new = 0
     self.island.procreation()
     for row in self.island.island:
         for cell in row:
             new += \
                 cell.number_of_individuals()["herbivores"] + \
                 cell.number_of_individuals()["carnivores"]
     nt.assert_greater(new, original)
     nt.assert_less_equal(new, original * 2)
コード例 #42
0
def test_recolor():
    wc = WordCloud(max_words=50)
    wc.generate(THIS)
    array_before = wc.to_array()
    wc.recolor()
    array_after = wc.to_array()
    # check that the same places are filled
    assert_array_equal(
        array_before.sum(axis=-1) != 0,
        array_after.sum(axis=-1) != 0)
    # check that they are not the same
    assert_greater(np.abs(array_before - array_after).sum(), 10000)

    # check that recoloring is deterministic
    wc.recolor(random_state=10)
    wc_again = wc.to_array()
    assert_array_equal(wc_again, wc.recolor(random_state=10))
コード例 #43
0
def test_correlation(alleles=None,
                     num_peptides_per_length=1000,
                     lengths=[8, 9, 10],
                     debug=False):
    peptides = []
    for length in lengths:
        peptides.extend(random_peptides(num_peptides_per_length, length))

    # Cache encodings
    peptides = EncodableSequences.create(list(set(peptides)))

    if alleles is None:
        alleles = set.intersection(*[
            set(predictor.supported_alleles)
            for predictor in PREDICTORS.values()
        ])
    alleles = sorted(set(alleles))
    df = pandas.DataFrame(index=peptides.sequences)

    results_df = []
    for allele in alleles:
        for (name, predictor) in PREDICTORS.items():
            df[name] = predictor.predict(peptides, allele=allele)
        correlation = numpy.corrcoef(numpy.log10(df["allele-specific"]),
                                     numpy.log10(df["pan-allele"]))[0, 1]
        results_df.append((allele, correlation))
        print(len(results_df), len(alleles), *results_df[-1])

        if correlation < 0.6:
            print("Warning: low correlation", allele)
            df["tightest"] = df.min(1)
            print(df.sort_values("tightest").iloc[:, :-1])
            if debug:
                import ipdb
                ipdb.set_trace()
            del df["tightest"]

    results_df = pandas.DataFrame(results_df,
                                  columns=["allele", "correlation"])
    print(results_df)

    print("Mean correlation", results_df.correlation.mean())
    assert_greater(results_df.correlation.mean(), 0.65)

    return results_df
コード例 #44
0
    def test_simulate(self):

        testfile, testout = self.setUp()

        space_i, outfile = initialize(testfile, testout)
        space_f = simulate(testfile, 100, 0.5, testout)

        xi, vi, mi = space_i.arrayvals()
        xf, vf, mf = space_f.arrayvals()

        # check for NaNs
        nt.assert_equal(np.any(np.isnan(xf)), False)
        nt.assert_equal(np.any(np.isnan(vf)), False)
        nt.assert_equal(np.any(np.isnan(mf)), False)

        # make sure none of the bodies are stacked on top of each other
        nt.assert_greater(space_f.bodies[0].scalardistance(space_f.bodies[1]),
                          1e-6)

        # check a file is created
        nt.assert_equal(True, os.path.isfile('testoutput.txt'))

        # make sure none of the masses have changed
        for i in range(2):
            nt.assert_equal(mi[i], mf[i])

        # make sure the CoM has not moved
        com_i = space_i.findCoM()
        com_f = space_f.findCoM()

        for i in range(2):
            nt.assert_equal(com_i.pos[i], com_f.pos[i])
            nt.assert_equal(com_i.vel[i], com_f.vel[i])

        # calculate total energy before and after iterating, make sure it hasn't changed significantly
        # check the total energy before and after iterating, make sure it hasn't changed significantly
        # *** right now only works for a 2-body system
        nt.assert_almost_equal(self.totalenergy(space_i),
                               self.totalenergy(space_f))

        # calculate total angular momentum before and after iterating, make sure it hasn't changed significantly
        Li = self.angularmomentum(space_i)
        Lf = self.angularmomentum(space_f)
        for i in range(2):
            nt.assert_almost_equal(Li[i], Lf[i])
コード例 #45
0
def test_external_versions_basic():
    ev = ExternalVersions()
    our_module = 'niceman'
    assert_equal(ev.versions, {})
    assert_equal(ev[our_module], __version__)
    # and it could be compared
    assert_greater_equal(ev[our_module], __version__)
    assert_greater(ev[our_module], '0.0.0a1')
    assert_equal(list(ev.keys()), [our_module])
    assert_true(our_module in ev)
    assert_false('unknown' in ev)

    # StrictVersion might remove training .0
    version_str = str(ev[our_module]) \
        if isinstance(ev[our_module], StrictVersion) \
        else __version__
    assert_equal(ev.dumps(), "Versions: %s=%s" % (our_module, version_str))

    # For non-existing one we get None
    assert_equal(ev['custom__nonexisting'], None)
    # and nothing gets added to _versions for nonexisting
    assert_equal(set(ev.versions.keys()), {our_module})

    # but if it is a module without version, we get it set to UNKNOWN
    assert_equal(ev['os'], ev.UNKNOWN)
    # And get a record on that inside
    assert_equal(ev.versions.get('os'), ev.UNKNOWN)
    # And that thing is "True", i.e. present
    assert (ev['os'])
    # but not comparable with anything besides itself (was above)
    assert_raises(TypeError, cmp, ev['os'], '0')
    assert_raises(TypeError, assert_greater, ev['os'], '0')

    return
    # Code below is from original duecredit, and we don't care about
    # testing this one
    # And we can get versions based on modules themselves
    from niceman.tests import mod
    assert_equal(ev[mod], mod.__version__)

    # Check that we can get a copy of the versions
    versions_dict = ev.versions
    versions_dict[our_module] = "0.0.1"
    assert_equal(versions_dict[our_module], "0.0.1")
    assert_equal(ev[our_module], __version__)
コード例 #46
0
ファイル: test_conversions.py プロジェクト: kashefy/elm
def Mat_ndarray_2d_values_floats():

    x = np.random.rand(3, 2) * 100.
    test_dummy = elm.Dummy()
    test_dummy.setMat(x)
    y = test_dummy.getMat()

    assert_true(x.dtype.name.startswith('float'))
    assert_equal(x.dtype, y.dtype)

    assert_is_instance(y, np.ndarray)

    assert_greater(y.size, 0)
    assert_equal(y.shape, x.shape)
    assert_true(np.all(x == y))
    x += 10
    assert_true(np.all(x == y))
    assert_true(x is y)
コード例 #47
0
def test_bar_item_base(fake_writer):
    x_axis = ["A", "B", "C"]
    bar_item_1 = [
        opts.BarItem(name=d[0], value=d[1])
        for d in list(zip(x_axis, [1, 2, 3]))
    ]
    bar_item_2 = [
        opts.BarItem(name=d[0], value=d[1])
        for d in list(zip(x_axis, [4, 5, 6]))
    ]

    c = (Bar().add_xaxis(x_axis).add_yaxis("series0", bar_item_1).add_yaxis(
        "series1", bar_item_2))
    c.render()
    _, content = fake_writer.call_args[0]
    assert_greater(len(content), 2000)
    assert_equal(c.theme, "white")
    assert_equal(c.renderer, "canvas")
コード例 #48
0
def test_rank_archimedean_spiral():
    def archimedean_spiral(n_steps=100, max_radius=1.0, turns=4.0):
        r = np.linspace(0.0, max_radius, n_steps)
        angle = r * 2.0 * np.pi * turns / max_radius
        x = r * np.cos(angle)
        y = r * np.sin(angle)
        return np.hstack((x[:, np.newaxis], y[:, np.newaxis])), r

    X_train, r_train = archimedean_spiral(n_steps=100)
    X_test, r_test = archimedean_spiral(n_steps=1000, max_radius=1.1)

    rsvm = RankingSVM(random_state=0)
    rsvm.fit(X_train)

    y_train = rsvm.predict(X_train)
    y_test = rsvm.predict(X_test)
    assert_true(np.all(y_train[1:] < y_train[:-1]))
    assert_greater(np.count_nonzero(y_test[1:] < y_test[:-1]), 970)
コード例 #49
0
def test_linear_contextual_sphere():
    random_state = np.random.RandomState(0)
    n_params = 3
    n_context_dims = 2
    obj = LinearContextualSphere(random_state, n_params, n_context_dims)

    opt = CCMAESOptimizer(context_features="affine", random_state=random_state,
                          log_to_stdout=False)
    opt.init(n_params, n_context_dims)
    params = np.empty(n_params)
    for i in range(1000):
        context = random_state.rand(n_context_dims) * 2.0 - 1.0
        opt.set_context(context)
        opt.get_next_parameters(params)
        opt.set_evaluation_feedback([obj.feedback(params, context)])
    policy = opt.best_policy()
    mean_reward = evaluate(policy, obj)
    assert_greater(mean_reward, -1e-4)
コード例 #50
0
    def test_clustering(self):
        model = ParallelKMeans(2, 7)
        model.fit(self.data)
        cluster1_predictions = model.predict(self.cluster1)
        cluster2_predictions = model.predict(self.cluster2)
        assert_array_equal(
            np.repeat(cluster1_predictions[0], len(self.cluster1)),
            cluster1_predictions)
        assert_array_equal(
            np.repeat(cluster2_predictions[0], len(self.cluster2)),
            cluster2_predictions)

        score1 = model.score(self.cluster1)
        assert_less(score1, 0)
        assert_greater(score1, -self.expected_error * 1.5)
        score2 = model.score(self.cluster2)
        assert_less(score2, 0)
        assert_greater(score2, -self.expected_error * 1.5)
コード例 #51
0
def test_positivity():
    """
    Tests that the divergence functions return positive values for non-equal arguments.
    """
    alphas = [0.1, 0.5, 1, 1.5]
    divergences = [
        alpha_divergence, renyi_divergence, tsallis_divergence,
        hellinger_divergence
    ]
    test_dists = [get_dists_2(), get_dists_3()]
    for alpha in alphas:
        for dists in test_dists:
            for dist1 in dists:
                for dist2 in dists:
                    if dist1 == dist2:
                        continue
                    for divergence in divergences:
                        assert_greater(divergence(dist1, dist2, alpha), 0)
コード例 #52
0
    def test_figure_size_with_legend(self):

        g1 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5)
        npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))
        g1.add_legend()
        nt.assert_greater(g1.fig.get_size_inches()[0], 6)

        g2 = ag.FacetGrid(self.df,
                          col="a",
                          hue="c",
                          size=4,
                          aspect=.5,
                          legend_out=False)
        npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
        g2.add_legend()
        npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))

        plt.close("all")
コード例 #53
0
    def test_train_model_multi_embed20_hidden40(self):
        multi_text = pd.read_csv("../../data/sentences_multilingual.csv")
        multi_text_train, multi_text_test = train_test_split(multi_text,
                                                             test_size=0.2)

        multi_text.groupby('lang').count()
        multi_text_train.groupby('lang').count()

        _c2i, _i2c = vocab.build_vocab(multi_text.sentence.values)
        _l2i, _i2l = vocab.build_label_vocab(multi_text.lang.values)

        multi_class = lang_id.LangID(input_vocab_n=len(_c2i),
                                     embedding_dims=20,
                                     hidden_dims=40,
                                     lstm_layers=1,
                                     output_class_n=5)

        lang_id.train_model(model=multi_class,
                            n_epochs=1,
                            training_data=multi_text_train,
                            c2i=_c2i,
                            i2c=_i2c,
                            l2i=_l2i,
                            i2l=_i2l)
        print("done")

        acc_multi, y_hat_multi = lang_id.eval_acc(multi_class, multi_text_test,
                                                  _c2i, _i2c, _l2i, _i2l)

        # Jupyter reported Accuracy: 0.6954
        # Run 1: Accuracy: 0.6954
        print(f"Accuracy: {acc_multi}")

        from sklearn.metrics import classification_report, confusion_matrix
        y_multi = multi_text_test.lang.values
        print(classification_report(y_multi, y_hat_multi))

        cm = confusion_matrix(y_multi, y_hat_multi)
        cm

        #reload(lang_id);
        lang_id.pretty_conf_matrix(cm, ['deu', 'eng', 'fra', 'ita', 'spa'])

        assert_greater(acc_multi, 0.60)
コード例 #54
0
def test_lda_inference_vi():
    rng = np.random.RandomState(4)
    n_topics = rng.randint(15, 20)
    n_words = rng.randint(400, 500)
    mean_words = 10
    min_words = 3
    doc_topic_prior = 1.
    topic_word_prior = 1.
    smooth_param = 0.01
    n_doc = rng.randint(100, 200)

    gen = LdaSampleGenerator(n_topics=n_topics,
                             n_words=n_words,
                             min_doc_size=min_words,
                             mean_doc_size=mean_words,
                             doc_topic_prior=doc_topic_prior,
                             topic_word_prior=topic_word_prior,
                             random_state=2)

    _, doc_word_mtx = gen.generate_documents(n_doc)
    alpha = gen.doc_topic_prior_
    beta = gen.topic_word_distr_

    smooth_beta = (1. - smooth_param) * beta + (smooth_param / n_words)

    docs_distr_uniform = np.ones((n_doc, n_topics)) / n_topics
    ll_uniform = doc_likelihood(doc_word_mtx, docs_distr_uniform, alpha, beta)

    inference_1 = lda_inference_vi(doc_word_mtx,
                                   alpha,
                                   smooth_beta,
                                   max_iter=5)
    inference_1 /= inference_1.sum(axis=1)[:, np.newaxis]

    ll_inference_1 = doc_likelihood(doc_word_mtx, inference_1, alpha, beta)

    inference_2 = lda_inference_vi(doc_word_mtx,
                                   alpha,
                                   smooth_beta,
                                   max_iter=1000)
    inference_2 /= inference_2.sum(axis=1)[:, np.newaxis]
    ll_inference_2 = doc_likelihood(doc_word_mtx, inference_2, alpha, beta)
    assert_greater(ll_inference_2, ll_inference_1)
    assert_greater(ll_inference_1, ll_uniform)
コード例 #55
0
ファイル: test_utils.py プロジェクト: davidmoremad/toolium
    def test_wait_until_first_element_is_found_timeout(self):
        # Configure driver mock
        self.driver_wrapper.driver.find_element.side_effect = NoSuchElementException(
            'Unknown')
        element_locator = (By.ID, 'element_id')

        start_time = time.time()
        with assert_raises(TimeoutException) as cm:
            self.utils.wait_until_first_element_is_found([element_locator],
                                                         timeout=10)
        end_time = time.time()

        assert_in("None of the page elements has been found after 10 seconds",
                  str(cm.exception))
        # find_element has been called more than one time
        self.driver_wrapper.driver.find_element.assert_called_with(
            *element_locator)
        assert_greater(end_time - start_time, 10,
                       'Execution time must be greater than timeout')
コード例 #56
0
ファイル: test_gmm.py プロジェクト: Jimmy-INL/pomegranate-1
def test_gmm_multivariate_mixed_fit_iterations():
	numpy.random.seed(0)
	X = numpy.concatenate([numpy.random.normal(i, 1, size=(100, 5)) for i in range(2)])
	X = numpy.abs(X)

	gmm2 = gmm.copy()
	gmm3 = gmm.copy()

	gmm.fit(X)
	gmm2.fit(X, max_iterations=1)
	gmm3.fit(X, max_iterations=1)

	logp1 = gmm.log_probability(X).sum()
	logp2 = gmm2.log_probability(X).sum()
	logp3 = gmm3.log_probability(X).sum()

	assert_raises(AssertionError, assert_equal, logp1, logp2)
	assert_equal(logp2, logp2)
	assert_greater(logp1, logp2)
コード例 #57
0
ファイル: test_colors.py プロジェクト: alexjiao125/Eelbrain
def test_plot_colors():
    "Test plotting color schemes"
    cells_1 = ('A', 'B')
    cells_2 = ('a', 'b', 'c')

    colors = plot.colors_for_oneway(cells_1)
    p = plot.ColorList(colors, show=False)
    w0, h0 = p.figure.get_size_inches()
    p.close()

    p = plot.ColorList(colors, labels={'A': 'A' * 50, 'B': 'Bbb'}, show=False)
    w, h = p.figure.get_size_inches()
    eq_(h, h0)
    assert_greater(w, w0)
    p.close()

    colors = plot.colors_for_twoway(cells_1, cells_2)
    p = plot.ColorList(colors, show=False)
    p.close()
コード例 #58
0
ファイル: test_interface.py プロジェクト: tarpn/aioax25
def test_reception_resets_cts():
    """
    Check the clear-to-send expiry is updated with received traffic.
    """
    my_port = DummyKISS()
    my_frame = AX25UnnumberedInformationFrame(destination='VK4BWI',
                                              source='VK4MSL',
                                              pid=0xf0,
                                              payload=b'testing')

    my_interface = AX25Interface(my_port)
    cts_before = my_interface._cts_expiry

    # Pass in a message
    my_port.received.emit(frame=bytes(my_frame))
    cts_after = my_interface._cts_expiry

    assert_less(cts_before, cts_after)
    assert_greater(cts_after, time.monotonic())
コード例 #59
0
def test_switch_to_ad3():
    # test if switching between qpbo and ad3 works

    if not get_installed(['qpbo']) or not get_installed(['ad3']):
        return
    X, Y = generate_blocks_multinomial(n_samples=5, noise=1.5, seed=0)
    crf = GridCRF(n_states=3, inference_method='qpbo')

    ssvm = OneSlackSSVM(crf, inference_cache=50, max_iter=10000)

    ssvm_with_switch = OneSlackSSVM(crf, inference_cache=50, max_iter=10000,
                                    switch_to=('ad3'))
    ssvm.fit(X, Y)
    ssvm_with_switch.fit(X, Y)
    assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
    # we check that the dual is higher with ad3 inference
    # as it might use the relaxation, that is pretty much guraranteed
    assert_greater(ssvm_with_switch.objective_curve_[-1],
                   ssvm.objective_curve_[-1])
コード例 #60
0
def test_get_need_sync_queries():
    with g.connection.begin_nested() as trans:
        user_id = fake_add_users(g.connection)[0]
        query_ids = fake_add_queries(g.connection)
        assert_greater(len(query_ids), 1)
        assert_equal(len(get_need_sync_queries(g.connection)), 0)
        assert not is_query_active_bi_id(g.connection, query_ids[0])
        watch(g.connection, user_id=user_id, query_id=query_ids[0])
        assert is_query_active_bi_id(g.connection, query_ids[0])
        assert_equal(len(get_need_sync_queries(g.connection)), 1)
        set_next_sync_time(g.connection,
                           id=query_ids[1],
                           time=datetime.utcnow() + timedelta(days=1))
        assert_equal(len(get_need_sync_queries(g.connection)), 1)
        set_next_sync_time(g.connection,
                           id=query_ids[1],
                           time=datetime.utcnow() - timedelta(days=1))
        assert_equal(len(get_need_sync_queries(g.connection)), 2)
        trans.rollback()