예제 #1
0
def check_notebooks_are_looked_up_in_cache():
    api = make_mock_api()

    api.list_notebooks()
    api.list_notebooks()

    assert_less_equal(api.note_store.listNotebooks.call_count, 1)
예제 #2
0
    def test_sample(self):
        # Test sampling algorithm by checking the slope and intercept of the regression line
        # between expected and observed numbers of observations for each state path
        num_samples = 10000

        for n, (obs, logprob, joint_probs) in enumerate(
                zip(self.test_obs_seqs, self.expected_forward_logprobs,
                    self.expected_joint_logprobs)):
            cond_probs = {K: V - logprob for K, V in joint_probs.items()}
            paths = self.generating_hmm.sample(obs, num_samples=num_samples)
            samples = Counter([tuple(X.astype(int)) for X in paths])
            expected = numpy.zeros(len(cond_probs))
            found = numpy.zeros(len(cond_probs))

            for n, (k, v) in enumerate(sorted(list(cond_probs.items()))):
                expected[n] = num_samples * numpy.exp(v)
                found[n] = samples.get(k, 0)

            m, b, r, p, std = scipy.stats.linregress(expected, found)
            assert_less_equal(
                abs(m - 1), 0.02,
                "Slope '%s' further from 1.0 than expected." % (m))
            assert_less_equal(
                abs(b), 1.0,
                "Intercept '%s' further from 0.0 than expected." % (b))
            assert_greater_equal(r, 0.95,
                                 "r '%s' less than 0.95 than expected." % r)
예제 #3
0
def test_top_words():
    top_words = _get_top_words(test_model_output, TOP_WORDS)
    nt.assert_equal(len(top_words), 2)  # each entry is a topic
    nt.assert_equal(len(top_words[1]), TOP_WORDS)
    # ensure that word list is sorted prooperly, with descending weights
    nt.assert_less_equal(top_words[0][1][0], top_words[0][0][0])
    nt.assert_less_equal(top_words[0][-1][0], top_words[0][-2][0])
예제 #4
0
파일: fir_tests.py 프로젝트: emd/filters
def test_Kaiser_getResponse():
    # Create a high-pass filter
    ripple = -60
    width = 5e3
    f_6dB = 10e3
    Fs = 4e6
    hpf = Kaiser(ripple, width, f_6dB, pass_zero=False, Fs=Fs)

    # Power response @ `f_6dB` should be -6 dB
    f, H = hpf.getResponse(f=f_6dB)
    np.testing.assert_allclose(20 * np.log10(np.abs(H)), -6, rtol=0.1)

    # Power response (dB) in stopband should be <= `ripple`
    f = np.arange(0, f_6dB - (0.5 * width), 10)
    f, H = hpf.getResponse(f=f)
    tools.assert_less_equal(np.max(20 * np.log10(np.abs(H))), ripple)

    # Power response in passband should be:
    #
    #   >= (1 - `delta`), and
    #   <= (1 + `delta`),
    #
    # where `delta` is the ripple expressed in amplitude
    # (as opposed to dB)
    delta = 10**(ripple / 20.)

    f = np.arange(f_6dB + (0.5 * width), 0.5 * Fs, 1000)
    f, H = hpf.getResponse(f=f)

    tools.assert_greater_equal(np.min(np.abs(H)), 1 - delta)

    tools.assert_less_equal(np.max(np.abs(H)), 1 + delta)

    return
예제 #5
0
파일: test_h1.py 프로젝트: bschwb/h1amg
def test_h1_real():
    """Test h1 amg for real example."""
    with ngs.TaskManager():
        mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.2))

        fes = ngs.H1(mesh, dirichlet=[1, 2, 3], order=1)

        u = fes.TrialFunction()
        v = fes.TestFunction()

        # rhs
        f = ngs.LinearForm(fes)
        f += ngs.SymbolicLFI(v)
        f.Assemble()

        # lhs
        a = ngs.BilinearForm(fes, symmetric=True)
        a += ngs.SymbolicBFI(grad(u) * grad(v))

        c = ngs.Preconditioner(a, 'h1amg2')
        a.Assemble()

        solver = ngs.CGSolver(mat=a.mat, pre=c.mat)

        gfu = ngs.GridFunction(fes)
        gfu.vec.data = solver * f.vec

    assert_greater(solver.GetSteps(), 0)
    assert_less_equal(solver.GetSteps(), 4)
예제 #6
0
    def on_epoch(self):
        '''
        Loops through an epoch of the validation dataset.
        '''

        # Calls epoch_callbacks' on_start_training()
        for epoch_callback in self._epoch_callbacks:
            epoch_callback.on_start_training()

        # Repeatedly calls epoch_callbacks' on_batch()
        keep_going = True

        while keep_going:
            input_batches = self._input_iterator.next()
            keep_going = not self._input_iterator.next_is_new_epoch()

            # pylint: disable=star-args
            computed_values = self._update_function(*input_batches)

            value_index = 0
            for epoch_callback in self._epoch_callbacks:
                if isinstance(epoch_callback, IterationCallback):
                    new_value_index = (value_index +
                                       len(epoch_callback.nodes_to_compute))
                    assert_less_equal(new_value_index, len(computed_values))

                    values = computed_values[value_index:new_value_index]
                    epoch_callback.on_iteration(values)

                    value_index = new_value_index

        # Calls epoch_callbacks' on_epoch() methods.
        for epoch_callback in self._epoch_callbacks:
            epoch_callback.on_epoch()
예제 #7
0
def test_incentive_process(lim=1e-14):
    """
    Compare stationary distribution computations to known analytic form for
    neutral landscape for the Moran process.
    """

    for n, N in [(2, 10), (2, 40), (3, 10), (3, 20), (4, 10)]:
        mu = (n - 1.) / n * 1./ (N + 1)
        alpha = N * mu / (n - 1. - n * mu)

        # Neutral landscape is the default
        edges = incentive_process.compute_edges(N, num_types=n,
                                                incentive_func=replicator, mu=mu)
        for logspace in [False, True]:
            stationary_1 = incentive_process.neutral_stationary(
                N, alpha, n, logspace=logspace)
            for exact in [False, True]:
                stationary_2 = stationary_distribution(
                    edges, lim=lim, logspace=logspace, exact=exact)
                for key in stationary_1.keys():
                    assert_almost_equal(
                        stationary_1[key], stationary_2[key], places=4)

        # Check that the stationary distribution satisfies balance conditions
        check_detailed_balance(edges, stationary_1)
        check_global_balance(edges, stationary_1)
        check_eigenvalue(edges, stationary_1)

        # Test Entropy Rate bounds
        er = entropy_rate(edges, stationary_1)
        h = (2. * n - 1) / n * numpy.log(n)
        assert_less_equal(er, h)
        assert_greater_equal(er, 0)
예제 #8
0
def test_roc_auc():
    score = roc_auc(classes, poor_classes)
    assert_less_equal(score, 0.55)
    assert_greater_equal(score, 0.45)

    assert_equal(roc_auc(classes, good_classes, ascending_score=True), 0.0)
    assert_equal(roc_auc(classes, good_classes, ascending_score=False), 1.0)
예제 #9
0
def test_create_run():
    t = Run()
    assert_equals(t.overall_time, 0)
    assert_equals(t.event_times, [])
    assert_equals(t.current_event_id, 0)
    assert_less_equal(t.run_start_time, time.time())
    assert_equals(t.event_start_time, t.run_start_time)
def test_mem_parse_giant_table():

    # Note: this test really wants to be run by itself in a process since it
    #       measures the *max* rss of the whole program. If python allocates
    #       a large object which goes away, the test will lie to us. Hence,
    #       kick_maxrss().
    alive = kick_maxrss()

    # Note: this has been tested with 1M row, and it works but it's slow.
    # 100krow makes the point.
    N_ROWS = 100000

    table = make_table(N_ROWS, 4)

    mem_before = getmaxrss_mb()

    n = 0
    for row in find_trs(BytesIO(table)):
        n += 1

    used = getmaxrss_mb() - mem_before

    assert_equal(N_ROWS, n)

    # Check that we didn't use more than 1MB to parse the table.
    assert_less_equal(used, 1)
예제 #11
0
def test_next_order_a_pp():
    # A ValueError should be raised if `len(a) >= len(x)`
    x = np.zeros(5)
    a = np.ones(len(x))
    tools.assert_raises(
        ValueError,
        next_order_a_pp,
        *[x, a])

    # Zero-order AR model produces forward and backwards errors
    # that are equal to the input signal. If the input signal's
    # entries are all equal, then we expect a_pp = -1.
    x = (1 + 1j) * np.ones(10)
    a = np.array([1.0])
    np.testing.assert_equal(
        next_order_a_pp(x, a),
        -1)

    # Analytic considerations constrain |a_{pp}| <= 1
    x = np.random.rand(100)
    a = np.random.rand(10)
    a[0] = 1
    tools.assert_less_equal(
        np.abs(next_order_a_pp(x, a)),
        1)

    return
예제 #12
0
def test_filter_low_degree_nodes():
    G1 = nx.erdos_renyi_graph(50, 0.1)
    G2 = nx.erdos_renyi_graph(50, 0.1)
    linkpred.filter_low_degree_nodes([G1, G2])
    assert_less_equal(len(G1), 50)
    assert_equal(len(G2), len(G1))

    G = nx.star_graph(4)
    G.add_edge(1, 2)
    linkpred.filter_low_degree_nodes([G], minimum=2)
    assert_equal(sorted(G), [0, 1, 2])

    edge_sets = [[(0, 1), (0, 5), (2, 3), (2, 5), (4, 3)],
                 [(0, 1), (0, 5), (2, 3), (2, 5), (4, 3), (4, 1)]]
    expected = [0, 2]
    graphs = []
    for edges in edge_sets:
        G = nx.Graph()
        G.add_edges_from(edges)
        for n in G:
            G.node[n]['eligible'] = n % 2 == 0
        graphs.append(G)
    linkpred.filter_low_degree_nodes(graphs, minimum=2)
    for G in graphs:
        assert_equal(sorted(n for n in G if G.node[n]['eligible']), expected)
예제 #13
0
    def test_record_splitting(self):
        """Publishers - Slack - AttachFullRecord - Split Record"""
        alert = get_alert()
        alert.created = datetime(2019, 1, 1)

        alert.record = {'massive_record': []}
        for index in range(0, 999):
            alert.record['massive_record'].append({
                'index': index,
                'value': 'foo'
            })

        publication = self._publisher.publish(alert, {})

        attachments = publication['@slack.attachments']

        assert_equal(len(attachments), 14)
        for attachment in attachments:
            assert_less_equal(len(attachment['text']), 4000)

        assert_equal(attachments[0]['title'], 'Record')
        assert_equal(len(attachments[0]['fields']), 0)
        assert_equal(attachments[0]['footer'], '')

        assert_equal(attachments[1]['title'], '')
        assert_equal(len(attachments[1]['fields']), 0)
        assert_equal(attachments[1]['footer'], '')

        assert_equal(attachments[13]['title'], '')
        assert_equal(len(attachments[13]['fields']), 1)
        assert_equal(attachments[13]['footer'],
                     'via <https://console.aws.amazon.com/s3/home|s3>')
 def test_response_times_for_3_month_period(self):
     raise SkipTest("API isn't fast enough for this yet...")
     url = (self.BASE_URL + self.BASE_PATH +
            '&start=2014-08-01T00:00:00Z'
            '&end=2014-11-01T00:00:00Z')
     min_, max_, median = _get_url_response_times(url)
     assert_less_equal(median, 1000)
예제 #15
0
def v_init(sim):
    np_rng = sim.random.NumpyRNG()
    rng = sim.random.NativeRNG(np_rng, seed=1)

    timestep = 1.
    sim.setup(timestep)

    n_neurons = 10000
    params = copy.copy(sim.IF_curr_exp.default_parameters)
    dist_params = {'mu': -70.0, 'sigma': 1.0}
    dist = 'normal'
    rand_dist = sim.random.RandomDistribution(dist, rng=rng, **dist_params)
    var = 'v'

    post = sim.Population(n_neurons, sim.IF_curr_exp, params, label='rand pop')
    post.initialize(**{var: rand_dist})
    post.record(var)

    sim.run(10)

    comp_var = post.get_data(var)
    comp_var = comp_var.segments[0].analogsignals[0]
    comp_var = np.asarray([float(x) for x in comp_var[0, :]])
    sim.end()

    v_std = comp_var.std()
    v_mean = comp_var.mean()

    epsilon = 0.1
    assert_less_equal(np.abs(v_mean - dist_params['mu']), epsilon)
    assert_less_equal(np.abs(v_std - dist_params['sigma']), epsilon)
예제 #16
0
    def test_get_next_candidate(self):
        """
        Tests the get next candidate function.
        Tests:
            - The candidate's parameters are acceptable
        """

        cand = None
        counter = 0
        while cand is None and counter < 20:
            cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_is_none(cand.result)
        params = cand.params
        assert_less_equal(params["x"], 1)
        assert_greater_equal(params["x"], 0)
        assert_in(params["name"], self.param_defs["name"].values)
        self.EAss.update(cand, "pausing")
        time.sleep(1)
        new_cand = None
        while new_cand is None and counter < 20:
            new_cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_equal(new_cand, cand)
예제 #17
0
    def test_get_next_candidate(self):
        """
        Tests the get next candidate function.
        Tests:
            - The candidate's parameters are acceptable
        """

        cand = None
        counter = 0
        while cand is None and counter < 20:
            cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_is_none(cand.result)
        params = cand.params
        assert_less_equal(params["x"], 1)
        assert_greater_equal(params["x"], 0)
        assert_in(params["name"], self.param_defs["name"].values)
        self.EAss.update(cand, "pausing")
        time.sleep(1)
        new_cand = None
        while new_cand is None and counter < 20:
            new_cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_equal(new_cand, cand)
    def elev_label_to_elev(elev_label):
        assert_greater_equal(elev_label, -1)
        elev_degrees = 30 if elev_label == -1 else (elev_label * 5 + 30)

        assert_greater_equal(elev_degrees, 30)
        assert_less_equal(elev_degrees, 90)
        return deg_to_rad(elev_degrees)
예제 #19
0
def test_scores_to_probs():
    scores = [-10000, 10000, 10001, 9999, 0, 5, 6, 6, 7]
    probs = scores_to_probs(scores)
    assert_less(abs(sum(probs) - 1), 1e-6)
    for prob in probs:
        assert_less_equal(0, prob)
        assert_less_equal(prob, 1)
예제 #20
0
파일: test_io.py 프로젝트: reubano/meza
    def test_geojson_with_key(self):
        """Test for reading GeoJSON files with a key"""
        for filepath in self.filepaths:
            records = io.read_geojson(filepath)
            f = cv.records2geojson(records, key='id')
            geojson = loads(f.read())

            nt.assert_equal('FeatureCollection', geojson['type'])
            nt.assert_true('crs' in geojson)
            nt.assert_equal(self.bbox, geojson['bbox'])
            nt.assert_true(geojson['features'])

            for feature in geojson['features']:
                nt.assert_equal('Feature', feature['type'])
                nt.assert_true('id' in feature)
                nt.assert_less_equal(2, len(feature['properties']))

                geometry = feature['geometry']

                if geometry['type'] == 'Point':
                    nt.assert_equal(2, len(geometry['coordinates']))
                elif geometry['type'] == 'LineString':
                    nt.assert_equal(2, len(geometry['coordinates'][0]))
                elif geometry['type'] == 'Polygon':
                    nt.assert_equal(2, len(geometry['coordinates'][0][0]))
예제 #21
0
    def init_sparse_linear(shared_variable, num_nonzeros, rng):
        params = shared_variable.get_value()
        params[...] = 0.0

        assert_greater_equal(num_nonzeros, 0)
        assert_less_equal(num_nonzeros, params.shape[0])

        for c in xrange(params.shape[1]):
            indices = rng.choice(params.shape[0], size=num_nonzeros, replace=False)

            # normal dist with stddev=1.0, divided by 255.0
            #
            # We need to divide by 255 for convergence. This is because
            # we're using unnormalized (i.e. 0 to 255) pixel values, unlike the
            # 0.0-to-1.0 pixels in
            # pylearn2.scripts.tutorials.multilayer_perceptron/
            #
            # We could just do as the above tutorial does and normalize the
            # pixels to [0.0, 1.0], and not rescale the weights. However,
            # experiments show that this converges to a higher error, and also
            # makes mnist_visualizer.py's results look very "staticky", without
            # any recognizable digit hallucinations.
            params[indices, c] = rng.randn(num_nonzeros) / 255.0

        shared_variable.set_value(params)
예제 #22
0
def check_number_of_calls(object_with_method,
                          method,
                          maximum_calls,
                          minimum_calls=1,
                          method_name=None):
    """
    Instruments the given method on the given object to verify the number of calls to the method is
    less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
    """
    method_wrap = Mock(wraps=method)
    wrap_patch = patch.object(object_with_method, method_name
                              or method.__name__, method_wrap)

    try:
        wrap_patch.start()
        yield

    finally:
        wrap_patch.stop()

        # verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
        assert_greater_equal(method_wrap.call_count, minimum_calls)

        # now verify the number of actual calls is less than (or equal to) the expected maximum
        assert_less_equal(method_wrap.call_count, maximum_calls)
예제 #23
0
def check_mongo_calls(mongo_store, max_finds=0, max_sends=None):
    """
    Instruments the given store to count the number of calls to find (incl find_one) and the number
    of calls to send_message which is for insert, update, and remove (if you provide max_sends). At the
    end of the with statement, it compares the counts to the max_finds and max_sends using a simple
    assertLessEqual.

    :param mongo_store: the MongoModulestore or subclass to watch
    :param max_finds: the maximum number of find calls to allow
    :param max_sends: If none, don't instrument the send calls. If non-none, count and compare to
        the given int value.
    """
    try:
        find_wrap = Mock(wraps=mongo_store.collection.find)
        wrap_patch = patch.object(mongo_store.collection, 'find', find_wrap)
        wrap_patch.start()
        if max_sends:
            sends_wrap = Mock(wraps=mongo_store.database.connection._send_message)
            sends_patch = patch.object(mongo_store.database.connection, '_send_message', sends_wrap)
            sends_patch.start()
        yield
    finally:
        wrap_patch.stop()
        if max_sends:
            sends_patch.stop()
            assert_less_equal(sends_wrap.call_count, max_sends)
        assert_less_equal(find_wrap.call_count, max_finds)
    def init_sparse_linear(shared_variable, num_nonzeros, rng):
        params = shared_variable.get_value()
        params[...] = 0.0

        assert_greater_equal(num_nonzeros, 0)
        assert_less_equal(num_nonzeros, params.shape[0])

        for c in xrange(params.shape[1]):
            indices = rng.choice(params.shape[0],
                                 size=num_nonzeros,
                                 replace=False)

            # normal dist with stddev=1.0
            params[indices, c] = rng.randn(num_nonzeros)

        # TODO: it's somewhat worrisome that the tutorial in
        # pylearn2.scripts.tutorials.multilayer_perceptron/
        #   multilayer_perceptron.ipynb
        # seems to do fine without scaling the weights like this
        if num_nonzeros > 0:
            params /= float(num_nonzeros)
            # Interestingly, while this seems more correct (normalize
            # columns to norm=1), it prevents the NN from converging.
            # params /= numpy.sqrt(float(num_nonzeros))

        shared_variable.set_value(params)
예제 #25
0
    def test_geojson_with_key(self):
        """Test for reading GeoJSON files with a key"""
        for filepath in self.filepaths:
            records = io.read_geojson(filepath)
            f = cv.records2geojson(records, key="id")
            geojson = loads(f.read())

            nt.assert_equal("FeatureCollection", geojson["type"])
            nt.assert_true("crs" in geojson)
            nt.assert_equal(self.bbox, geojson["bbox"])
            nt.assert_true(geojson["features"])

            for feature in geojson["features"]:
                nt.assert_equal("Feature", feature["type"])
                nt.assert_true("id" in feature)
                nt.assert_less_equal(2, len(feature["properties"]))

                geometry = feature["geometry"]

                if geometry["type"] == "Point":
                    nt.assert_equal(2, len(geometry["coordinates"]))
                elif geometry["type"] == "LineString":
                    nt.assert_equal(2, len(geometry["coordinates"][0]))
                elif geometry["type"] == "Polygon":
                    nt.assert_equal(2, len(geometry["coordinates"][0][0]))
예제 #26
0
    def init_sparse_bias(shared_variable, num_nonzeros, rng):
        """
        Mimics the sparse initialization in
        pylearn2.models.mlp.Linear.set_input_space()
        """

        params = shared_variable.get_value()
        assert_equal(params.shape[0], 1)

        assert_greater_equal(num_nonzeros, 0)
        assert_less_equal(num_nonzeros, params.shape[1])

        params[...] = 0.0

        indices = rng.choice(params.size, size=num_nonzeros, replace=False)

        # normal dist with stddev=1.0
        params[0, indices] = rng.randn(num_nonzeros)

        # Found that for biases, this didn't help (it increased the
        # final misclassification rate by .001)
        # if num_nonzeros > 0:
        #     params /= float(num_nonzeros)

        shared_variable.set_value(params)
예제 #27
0
def check_sum_of_calls(object_, methods, maximum_calls, minimum_calls=1):
    """
    Instruments the given methods on the given object to verify that the total sum of calls made to the
    methods falls between minumum_calls and maximum_calls.
    """
    mocks = {
        method: Mock(wraps=getattr(object_, method))
        for method in methods
    }

    with patch.multiple(object_, **mocks):
        yield

    call_count = sum(mock.call_count for mock in mocks.values())
    calls = pprint.pformat({
        method_name: mock.call_args_list
        for method_name, mock in mocks.items()
    })

    # Assertion errors don't handle multi-line values, so pretty-print to std-out instead
    if not minimum_calls <= call_count <= maximum_calls:
        print "Expected between {} and {} calls, {} were made. Calls: {}".format(
            minimum_calls,
            maximum_calls,
            call_count,
            calls,
        )

    # verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
    assert_greater_equal(call_count, minimum_calls)

    # now verify the number of actual calls is less than (or equal to) the expected maximum
    assert_less_equal(call_count, maximum_calls)
def test_no_jump():
    urdf_path = os.path.join(DATA_PATH, "kuka_lbr.urdf")
    base_link = "kuka_lbr_l_link_0"
    ee_link = "kuka_lbr_l_link_7"

    aik = ApproxLocalInvKin(urdf_path,
                            base_link,
                            ee_link,
                            max_jump=0.1,
                            verbose=0)
    aik.reset()
    n_joints = aik.get_n_joints()

    q = np.zeros(n_joints)
    p = np.empty(7)
    aik.jnt_to_cart(q, p)
    p_new = np.copy(p)
    p_new[:3] += np.array([0.1, 0.1, -0.1])
    q_new = np.copy(q)
    aik.cart_to_jnt(p, q_new)
    for _ in range(10):
        q_new = np.copy(q_new)
        aik.cart_to_jnt(p_new, q_new)
        for i in range(n_joints):
            assert_less_equal(abs(q[i] - q_new[i]), 0.10001)
        q = q_new
예제 #29
0
    def test_geojson_with_key(self):
        """Test for reading GeoJSON files with a key"""
        for filepath in self.filepaths:
            records = io.read_geojson(filepath)
            f = cv.records2geojson(records, key='id')
            geojson = loads(f.read())

            nt.assert_equal('FeatureCollection', geojson['type'])
            nt.assert_true('crs' in geojson)
            nt.assert_equal(self.bbox, geojson['bbox'])
            nt.assert_true(geojson['features'])

            for feature in geojson['features']:
                nt.assert_equal('Feature', feature['type'])
                nt.assert_true('id' in feature)
                nt.assert_less_equal(2, len(feature['properties']))

                geometry = feature['geometry']

                if geometry['type'] == 'Point':
                    nt.assert_equal(2, len(geometry['coordinates']))
                elif geometry['type'] == 'LineString':
                    nt.assert_equal(2, len(geometry['coordinates'][0]))
                elif geometry['type'] == 'Polygon':
                    nt.assert_equal(2, len(geometry['coordinates'][0][0]))
예제 #30
0
def test_acc_bytes_small():
    lines = 'This is a set of lines of varying length'.split()
    lines = [line + '\n' for line in lines]
    it = iter(lines)
    prefix = reader_impl.accumulate_bytes(it, 20)
    assert_less_equal(20, len(prefix))
    assert_equal(prefix + ''.join(list(it)), ''.join(lines))
예제 #31
0
def check_sum_of_calls(object_, methods, maximum_calls, minimum_calls=1):
    """
    Instruments the given methods on the given object to verify that the total sum of calls made to the
    methods falls between minumum_calls and maximum_calls.
    """
    mocks = {
        method: Mock(wraps=getattr(object_, method))
        for method in methods
    }

    with patch.multiple(object_, **mocks):
        yield

    call_count = sum(mock.call_count for mock in mocks.values())
    calls = pprint.pformat({
        method_name: mock.call_args_list
        for method_name, mock in mocks.items()
    })

    # Assertion errors don't handle multi-line values, so pretty-print to std-out instead
    if not minimum_calls <= call_count <= maximum_calls:
        print "Expected between {} and {} calls, {} were made. Calls: {}".format(
            minimum_calls,
            maximum_calls,
            call_count,
            calls,
        )

    # verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
    assert_greater_equal(call_count, minimum_calls)

    # now verify the number of actual calls is less than (or equal to) the expected maximum
    assert_less_equal(call_count, maximum_calls)
예제 #32
0
def test_bbox():
    geohash = _uut.Geohash(area_in_km_square=100 * 1000)

    index = 2595
    triangle = geohash.triangle(index=index, astype=_uut.AS_TYPE_TRIANGLE)
    bbox = geohash.bbox(index=index)

    for point in triangle.points:
        _tools.assert_greater_equal(
            point.lat,
            bbox['s']
        )

        _tools.assert_less_equal(
            point.lat,
            bbox['n']
        )

        _tools.assert_greater_equal(
            point.lng,
            bbox['w']
        )

        _tools.assert_less_equal(
            point.lng,
            bbox['e']
        )
예제 #33
0
 def check_descriptor_between(self, catchment, descr, lower, upper):
     nt.assert_greater_equal(getattr(catchment.descriptors, descr), lower,
                             msg="Catchment {} does not have a `descriptors.`{}>={}"
                             .format(catchment.id, descr, lower))
     nt.assert_less_equal(getattr(catchment.descriptors, descr), upper,
                          msg="Catchment {} does not have a `descriptors.`{}<={}"
                          .format(catchment.id, descr, upper))
예제 #34
0
def test_scores_to_probs():
    scores = [-10000, 10000, 10001, 9999, 0, 5, 6, 6, 7]
    probs = scores_to_probs(scores)
    assert_less(abs(sum(probs) - 1), 1e-6)
    for prob in probs:
        assert_less_equal(0, prob)
        assert_less_equal(prob, 1)
예제 #35
0
def check_window_landmark(ivc, landmark, flank_up=50, flank_down=100):
    test_roi, test_offset, (test_chrom, test_pos,
                            test_strand) = window_landmark(
                                ivc, flank_up, flank_down, landmark)

    # make sure position of interest matches what we gave window_landmark
    ref_chrom, ref_pos, ref_strand = ivc.get_genomic_coordinate(landmark)
    assert_equal(ref_chrom, test_chrom)
    assert_equal(ref_pos, test_pos)
    assert_equal(ref_strand, test_strand)

    # make sure position is in roi
    assert_true(test_pos in test_roi.get_position_set())

    # assure test roi length + offset == flank_up + flank_down
    # which it always should, unless the landmark is very close
    # to the edge of the transcript, in which case it should be smaller
    if landmark + flank_down <= test_roi.length:
        assert_equal(test_offset + test_roi.length, flank_up + flank_down)
    else:
        assert_less_equal(test_offset + test_roi.length, flank_up + flank_down)

    # test offset in roi is correct relative to offset and flank
    # only relevant for plus-strand
    roi_pos = test_roi.get_segmentchain_coordinate(test_chrom, test_pos,
                                                   test_strand)
    assert_equal(roi_pos + test_offset, flank_up)
예제 #36
0
def test_max_marginals():
    """
    Test that max-marginals are correct.
    """
    for h in hypergraphs():
        w = utils.random_viterbi_potentials(h)
        print w.show(h)

        path = ph.best_path(h, w)
        best = w.dot(path)
        print "BEST"

        print "\n".join(["%20s : %s"%(edge.label, w[edge]) for edge in path.edges])
        print best
        nt.assert_not_equal(best, 0.0)
        max_marginals = ph.compute_marginals(h, w)
        for node in h.nodes:
            other = max_marginals[node]
            nt.assert_less_equal(other, best + 1e-4)

        for edge in h.edges:
            other = max_marginals[edge]
            nt.assert_less_equal(other, best + 1e-4)
            if edge in path:
                nt.assert_almost_equal(other, best)
예제 #37
0
 def test_trip_length(self):
     ret = len(self.returned["trip"])
     #assert_equal(ret, 3)  # General case
     assert_greater_equal(
         ret,
         3)  # Vårsta -> Tumba -> T-Centralen -> Tekniska högskolan [-> ...]
     assert_less_equal(ret, 7)  # Not even SL could break this assertion
예제 #38
0
def test_def_rxtr_req_sample():
    s = RandomRequestPoint()
    assert_equal(1, s.n_commods.sample())
    assert_equal(1, s.n_request.sample())
    assert_equal(1, s.assem_per_req.sample())
    assert_false(s.assem_multi_commod.sample())
    assert_equal(0, s.req_multi_commods.sample())
    assert_false(s.exclusive.sample())
    assert_equal(0, s.n_req_constr.sample())
    assert_equal(1, s.n_supply.sample())
    assert_equal(0, s.sup_multi.sample())
    assert_equal(0, s.sup_multi_commods.sample())
    assert_equal(1, s.n_sup_constr.sample())
    assert_equal(1, s.sup_constr_val.sample())
    assert_true(s.connection.sample())
    s1 = RandomRequestPoint()
    assert_equal(s1, s)
    constr_avg = 0
    pref_avg = 0
    n = 5000
    for i in range(n):
        constr = s.constr_coeff.sample()
        constr_avg += constr
        assert_greater(constr,  0)
        assert_less_equal(constr,  2)
        pref = s.pref_coeff.sample()
        pref_avg += pref
        assert_greater(pref,  0)
        assert_less_equal(pref,  1)
    assert_almost_equal(1.0, constr_avg / n, places=1)
    assert_almost_equal(0.5, pref_avg / n, places=1)
예제 #39
0
def advance_to_next_cl_segment(session, commitlog_dir,
                               keyspace_name='ks', table_name='junk_table',
                               timeout=60, debug=True):
    """
    This is a hack to work around problems like CASSANDRA-11811.

    The problem happens in commitlog-replaying tests, like the snapshot and CDC
    tests. If we replay the first commitlog that's created, we wind up
    replaying some mutations that initialize system tables, so this function
    advances the node to the next CL by filling up the first one.
    """
    if debug:
        _debug = dtest.debug
    else:
        def _debug(*args, **kwargs):
            """
            noop debug method
            """
            pass

    session.execute(
        'CREATE TABLE {ks}.{tab} ('
        'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, '
        'e uuid, f uuid, g uuid, h uuid'
        ')'.format(ks=keyspace_name, tab=table_name)
    )
    prepared_insert = session.prepare(
        'INSERT INTO {ks}.{tab} '
        '(a, b, c, d, e, f, g, h) '
        'VALUES ('
        'uuid(), uuid(), uuid(), uuid(), '
        'uuid(), uuid(), uuid(), uuid()'
        ')'.format(ks=keyspace_name, tab=table_name)
    )

    # record segments that we want to advance past
    initial_cl_files = _files_in(commitlog_dir)

    start = time.time()
    stop_time = start + timeout
    rate_limited_debug = get_rate_limited_function(_debug, 5)
    _debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))

    while _files_in(commitlog_dir) <= initial_cl_files:
        elapsed = time.time() - start
        rate_limited_debug('  commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
        assert_less_equal(
            time.time(), stop_time,
            "It's been over a {s}s and we haven't written a new "
            "commitlog segment. Something is wrong.".format(s=timeout)
        )
        execute_concurrent(
            session,
            ((prepared_insert, ()) for _ in range(1000)),
            concurrency=500,
            raise_on_first_error=True,
        )

    _debug('present commitlog segments: {}'.format(_files_in(commitlog_dir)))
    def __call__(self, data, framerate, **kargs):
        if len(data) < self.window:
            return data

        begin = np.argmax(_get_ninapro_amp(data, framerate)
                          [self.window // 2:-(self.window - self.window // 2 - 1)])
        assert_less_equal(begin + self.window, len(data))
        return data[begin:begin + self.window]
예제 #41
0
 def test_record_ctor(self):
     test = Partner(**self.record)
     nt.assert_equal(test.port, 4040)
     nt.assert_less_equal(test.created_on, datetime.now())
     nt.assert_less_equal(test.updated_on, datetime.now())
     nt.assert_equal(test.deleted, True)
     nt.assert_equal(test.id, 12)
     nt.assert_equal(test.name, 'PARTNERS')
예제 #42
0
 def test_local_inputs_contents(self):
     xs = self.mws._local_search_xs(0, 20, 20)
     random.seed(1)
     # this is stochastic, so run it 100 times & hope any errors are caught
     for _ in xrange(100):
         for i, x in enumerate(xs):
             assert_greater_equal(i + 1, x)
             assert_less_equal(i, x)
    def azim_label_to_azim(azim_label):
        azim_degrees = 0 if azim_label == -1 else azim_label * 10

        assert_greater_equal(azim_degrees, 0)
        assert_less_equal(azim_degrees, 340)
        assert_equal(azim_degrees % 20, 0)

        return deg_to_rad(azim_degrees)
예제 #44
0
def test_ChaosGame_starting_point():
    """Test if starting points are within radius 1 from origin (unit circle)"""

    for i in range(3, 10):
        testgon = ChaosGame(i, 1 / 2)
        testgon._starting_point()
        nt.assert_less_equal(
            np.sqrt(testgon.start[0]**2 + testgon.start[1]**2), 1)
예제 #45
0
 def test_generate_one_conf(self):
     N = 10
     dim = 2
     L = 100.0
     x = generate_one_conf(L, N, dim)
     assert_equal(x.shape, (N, dim))
     assert_greater_equal(x.min(), 0.0)
     assert_less_equal(x.max(), L)
예제 #46
0
 def _test_grid_rank(self, name):
     """Test var rank."""
     # Note: this function could probably be reworked to not
     #       be a separate function call
     rank = self.bmi.get_grid_rank(name)
     assert_is_instance(rank, int)
     assert_less_equal(rank, 3)
     return str(rank)
예제 #47
0
def check_bloch_normalization(N, epsilon):
    Theta, Phi = inv.construct_grid(N)
    Angles = np.array([np.cos(Theta), Phi])
    Densities = inv.G_angles_q12(Angles, epsilon)
    Scaled_densities = Densities*np.sin(Theta)
    Total_prob = np.trapz(np.trapz(Scaled_densities, Phi), Theta[:,0])
    assert_greater(Total_prob, 0.95)
    assert_less_equal(Total_prob, 1)
예제 #48
0
def check_sum_of_calls(object_,
                       methods,
                       maximum_calls,
                       minimum_calls=1,
                       include_arguments=True):
    """
    Instruments the given methods on the given object to verify that the total sum of calls made to the
    methods falls between minumum_calls and maximum_calls.
    """

    mocks = {
        method:
        StackTraceCounter.capture_call(getattr(object_, method),
                                       stack_depth=7,
                                       include_arguments=include_arguments)
        for method in methods
    }

    with patch.multiple(object_, **mocks):
        yield

    call_count = sum(capture_fn.stack_counter.total_calls
                     for capture_fn in mocks.values())

    # Assertion errors don't handle multi-line values, so pretty-print to std-out instead
    if not minimum_calls <= call_count <= maximum_calls:
        messages = [
            "Expected between {} and {} calls, {} were made.\n\n".format(
                minimum_calls,
                maximum_calls,
                call_count,
            )
        ]
        for method_name, capture_fn in mocks.items():
            stack_counter = capture_fn.stack_counter
            messages.append("{!r} was called {} times:\n".format(
                method_name, stack_counter.total_calls))
            for stack in stack_counter:
                messages.append("  called {} times:\n\n".format(
                    stack_counter.stack_calls(stack)))
                messages.append("    " +
                                "    ".join(traceback.format_list(stack)))
                messages.append("\n\n")
                if include_arguments:
                    for (args, kwargs), count in stack_counter[stack].items():
                        messages.append(
                            "      called {} times with:\n".format(count))
                        messages.append("      args: {}\n".format(args))
                        messages.append("      kwargs: {}\n\n".format(
                            dict(kwargs)))

        print "".join(messages)

    # verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
    assert_greater_equal(call_count, minimum_calls)

    # now verify the number of actual calls is less than (or equal to) the expected maximum
    assert_less_equal(call_count, maximum_calls)
예제 #49
0
def test_uniform():
	d = UniformDistribution( 0, 10 )

	assert_equal( d.log_probability( 2.34 ), -2.3025850929940455 )
	assert_equal( d.log_probability( 2 ), d.log_probability( 8 ) )
	assert_equal( d.log_probability( 10 ), d.log_probability( 3.4 ) )
	assert_equal( d.log_probability( 1.7 ), d.log_probability( 9.7 ) )
	assert_equal( d.log_probability( 10.0001 ), float( "-inf" ) )
	assert_equal( d.log_probability( -0.0001 ), float( "-inf" ) )

	for i in xrange( 10 ):
		data = np.random.randn( 100 ) * 100
		d.from_sample( data )
		assert_equal( d.parameters[0], data.min() ) 
		assert_equal( d.parameters[1], data.max() )

	minimum, maximum = data.min(), data.max()
	for i in xrange( 100 ):
		sample = d.sample()
		assert_less_equal( minimum, sample )
		assert_less_equal( sample,  maximum )

	d = UniformDistribution( 0, 10 )
	d.from_sample( [ -5, 20 ], inertia=0.5 )

	assert_equal( d.parameters[0], -2.5 )
	assert_equal( d.parameters[1], 15 )

	d.from_sample( [ -100, 100 ], inertia=1.0 )

	assert_equal( d.parameters[0], -2.5 )
	assert_equal( d.parameters[1], 15 )

	d.summarize( [ 0, 50, 2, 24, 28 ] )
	d.summarize( [ -20, 7, 8, 4 ] )
	d.from_summaries( inertia=0.75 )

	assert_equal( d.parameters[0], -6.875 )
	assert_equal( d.parameters[1], 23.75 )

	d.summarize( [ 0, 100 ] )
	d.summarize( [ 100, 200 ] )
	d.from_summaries()

	assert_equal( d.parameters[0], 0 )
	assert_equal( d.parameters[1], 200 )

	d.freeze()
	d.from_sample( [ 0, 1, 6, 7, 8, 3, 4, 5, 2 ] )
	assert_equal( d.parameters, [ 0, 200 ] )

	d.thaw()
	d.from_sample( [ 0, 1, 6, 7, 8, 3, 4, 5, 2 ] )
	assert_equal( d.parameters, [ 0, 8 ] )

	e = Distribution.from_json( d.to_json() )
	assert_equal( e.name, "UniformDistribution" )
	assert_equal( e.parameters, [ 0, 8 ] )
def check_fit(degree):
    y = _lifted_predict(U[:degree], X)

    est = PolynomialNetworkRegressor(degree=degree, n_components=n_components,
                                     max_iter=50000, beta=0.001, tol=1e-2,
                                     random_state=0)
    y_pred = est.fit(X, y).predict(X)
    assert_less_equal(mean_squared_error(y, y_pred), 1e-4,
                      msg="Cannot learn degree {} function.".format(degree))
예제 #51
0
def test_rand_instability_score():
    s = 20
    ks = np.arange(2, 21)
    n = 30
    rand_scores = rand_instability_score(ks, n, s)
    assert_array_equal(rand_scores.keys(), ks)
    for rand_score in rand_scores.values():
        assert_less_equal(rand_score, 1.)
        assert_greater_equal(rand_score, 0.)
예제 #52
0
def test_rand_instability_score():
    s = 20
    ks = np.arange(2, 21)
    n = 30
    rand_scores = rand_instability_score(ks, n, s)
    assert_array_equal(rand_scores.keys(), ks)
    for rand_score in rand_scores.values():
        assert_less_equal(rand_score, 1.)
        assert_greater_equal(rand_score, 0.)
예제 #53
0
 def test_compute_gr_2d(self):
     N = 10
     dim = 2
     L = 100.0
     x = generate_one_conf(L, N, dim)
     dist = compute_distances(x, L, N, dim)
     r, gr = compute_gr_2d(dist, N, nbins=100)
     assert_greater_equal(gr.min(), 0.0)
     assert_less_equal(gr.max(), L * numpy.sqrt(dim))
예제 #54
0
 def check_vals_against_wig(self, expected, found):
     diff = abs(expected - found)
     maxdiff = diff.max()
     maxloc = diff.argmax()
     msg = "Maximum difference found between BigWig and Wiggle (%s) is at position %s and exceeded tolerance (%s).\n" % (
         maxdiff, maxloc, TOL)
     msg += "At that position, expected %s, got %s." % (expected[maxloc],
                                                        found[maxloc])
     assert_less_equal(maxdiff, TOL, msg)
 def _assert(self, root):
     if not root:
         return
     if root.left:
         assert_less_equal(root.left.val, root.val)
         self._assert(root.left)
     if root.right:
         assert_less_equal(root.val, root.right.val)
         self._assert(root.right)