예제 #1
0
def test_yule_data():
    ar, v, c = aryule([1,-1,1,1,1],2, norm='biased')
    assert_almost_equal(ar[0], 0.0+0.j)
    assert_almost_equal(ar[1], -0.2+0.j)
    assert_almost_equal(v, 0.95999999999999996)
    assert_almost_equal(c[0], 0.0+0.j)
    assert_almost_equal(c[1], -0.2+0.j)
예제 #2
0
def test_decimate():
    """Test decimation of digitizer headshapes with too many points."""
    # load headshape and convert to meters
    hsp_mm = _get_ico_surface(5)['rr'] * 100
    hsp_m = hsp_mm / 1000.

    # save headshape to a file in mm in temporary directory
    tempdir = _TempDir()
    sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
    np.savetxt(sphere_hsp_path, hsp_mm)

    # read in raw data using spherical hsp, and extract new hsp
    with warnings.catch_warnings(record=True) as w:
        raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
    assert_true(any('more than' in str(ww.message) for ww in w))
    # collect headshape from raw (should now be in m)
    hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]

    # with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
    # should be a bit over 5000 points. If not, something is wrong or
    # decimation resolution has been purposefully changed
    assert_true(len(hsp_dec) > 5000)

    # should have similar size, distance from center
    dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
    dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
    hsp_rad = np.mean(dist)
    hsp_dec_rad = np.mean(dist_dec)
    assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
예제 #3
0
    def test_applyaction(self):
        action = Action(1.0, 0.0)
        world = WorldSim(10, 10, default_x=5.0, default_y=5.0)
        world.applyaction(action)
        # Test just the linear change
        assert_equal(world.x, 5.0 + WorldSim.TICK_DURATION)

        action = Action(0.0, 1.0)
        world.reset()
        world.applyaction(action)
        # Test just the angular change
        assert_equal(world.theta, WorldSim.TICK_DURATION)

        action = Action(1.0, 1 / WorldSim.TICK_DURATION * math.pi / 2)
        world.reset()
        world.applyaction(action)
        # Test just the linear change
        assert_equal(world.y, 5.0 + WorldSim.TICK_DURATION)

        world = WorldSim(10.0, 10.0, default_x=0.0, default_y=0.0)
        action = Action(1 / WorldSim.TICK_DURATION * 9 * math.sqrt(2),
                        1 / WorldSim.TICK_DURATION * math.pi / 4)
        world.applyaction(action)
        assert_almost_equal(world.x, 9)
        assert_almost_equal(world.y, 9)
def test_energy():
    # make sure that energy as computed by ssvm is the same as by lp
    np.random.seed(0)
    for inference_method in ["lp", "ad3"]:
        found_fractional = False
        crf = EdgeFeatureGraphCRF(n_states=3,
                                  inference_method=inference_method,
                                  n_edge_features=2)
        while not found_fractional:
            x = np.random.normal(size=(7, 8, 3))
            edge_list = make_grid_edges(x, 4, return_lists=True)
            edges = np.vstack(edge_list)
            edge_features = edge_list_to_features(edge_list)
            x = (x.reshape(-1, 3), edges, edge_features)

            unary_params = np.random.normal(size=(3, 3))
            pw1 = np.random.normal(size=(3, 3))
            pw2 = np.random.normal(size=(3, 3))
            w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
            res, energy = crf.inference(x, w, relaxed=True, return_energy=True)
            found_fractional = np.any(np.max(res[0], axis=-1) != 1)

            psi = crf.psi(x, res)
            energy_svm = np.dot(psi, w)

            assert_almost_equal(energy, -energy_svm)
            if not found_fractional:
                # exact discrete labels, test non-relaxed version
                res, energy = crf.inference(x, w, relaxed=False,
                                            return_energy=True)
                psi = crf.psi(x, res)
                energy_svm = np.dot(psi, w)

                assert_almost_equal(energy, -energy_svm)
def test_keep_top_k_epitopes():
    arg_parser = make_variant_sequences_arg_parser()
    args = arg_parser.parse_args([
        "--vcf", data_path("b16.f10/b16.f10.Phip.vcf"),
        "--bam", data_path("b16.f10/b16.combined.sorted.bam"),
    ])
    reads_generator = allele_reads_generator_from_args(args)
    variants = variant_collection_from_args(args)
    keep_k_epitopes = 3
    core_logic = VaxrankCoreLogic(
        reads_generator=reads_generator,
        mhc_predictor=random_binding_predictor,
        variants=variants,
        vaccine_peptide_length=15,
        padding_around_mutation=5,
        min_alt_rna_reads=1,
        min_variant_sequence_coverage=1,
        variant_sequence_assembly=True,
        max_vaccine_peptides_per_variant=1,
        num_mutant_epitopes_to_keep=keep_k_epitopes)
    ranked_list = core_logic.ranked_vaccine_peptides()

    for variant, vaccine_peptides in ranked_list:
        vaccine_peptide = vaccine_peptides[0]
        eq_(keep_k_epitopes, len(vaccine_peptide.mutant_epitope_predictions))
        # recompute the expected score, make sure the top-k argument from ranked_vaccine_peptides()
        # propagated as expected
        mutant_epitope_score = sum(
            p.logistic_epitope_score() for p in vaccine_peptide.mutant_epitope_predictions)
        assert_almost_equal(mutant_epitope_score, vaccine_peptide.mutant_epitope_score)
예제 #6
0
def test_covar_15_ip():
    af, pf, ab, pb, pbv = arcovar_marple(marple_data, 15)
    assert_almost_equal(pf, 0.0031358526195905032)
    assert_almost_equal(pb, 0.0026095580050847235)
    assert_array_almost_equal(af[0:15], array([  3.14064291e+00 -0.53085796j,   6.71499124e+00 -2.02047795j,
         1.06218919e+01 -4.91215366j,   1.40604378e+01 -8.88144555j,
         1.56600743e+01-13.2925649j ,   1.52808636e+01-17.26357445j,
         1.29553371e+01-20.19441487j,   9.56479043e+00-21.35967801j,
         5.76086019e+00-20.39407074j,   2.35478080e+00-17.25236853j,
        -1.39883911e-02-12.63099132j,  -1.01307484e+00 -7.71542788j,
        -1.00735874e+00 -3.71449987j,  -5.47782956e-01 -1.24481265j,
        -1.63739470e-01 -0.22820697j]))
    assert_array_almost_equal(ab[0:15], array([  3.06854326 +0.4396126j ,   6.52836187 +1.85223579j,
        10.14250939 +4.53484335j,  13.27104933 +8.16295648j,
        14.65282324+12.10370542j,  14.30283278+15.67072521j,
        12.13984749+18.32533332j,   9.02885933+19.34952244j,
         5.49933445+18.38815454j,   2.39313549+15.41172794j,
         0.23240843+11.16952573j,  -0.69430878 +6.74812076j,
        -0.75349882 +3.21552564j,  -0.42710881 +1.07407686j,
        -0.13625884 +0.18990667j]))
    assert_array_almost_equal(pbv, array([23.002882564886164,
         14.963158025030376,     11.46060060362683,
         8.8047876198403294,     8.464718707735825,
         6.7595928955003961,     3.9194229830412644,
         3.4283223276191257,     2.2528330561384045,
         1.174361182536527,     0.53260425403862111,
         0.30138304540853789,     0.1893577453852136,
         0.13685257356088598]))
예제 #7
0
def test_query_resolution():
    bbox = (-180, -90, 180, 90)
    init_res = (4.5, 6.7)
    query = mapnik.Query(mapnik.Box2d(*bbox), init_res)
    r = query.resolution
    assert_almost_equal(r[0], init_res[0], places=7)
    assert_almost_equal(r[1], init_res[1], places=7)
예제 #8
0
def test_match_velocities():
    regression_data=yaml.load(open(os.path.join(os.path.dirname(__file__),'match_velocities.yml')))
    boid_data=regression_data["before"]
    match_velocities(boid_data)
    for after,before in zip(regression_data["after"],boid_data):
        for after_value,before_value in zip(after,before): 
            assert_almost_equal(after_value,before_value,delta=0.01)
예제 #9
0
def test_bad_boids_regression():
    regression_data=yaml.load(open(os.path.join(os.path.dirname(__file__),'fixture.yml')))
    boid_data=regression_data["before"]
    update_boids(boid_data)
    for after,before in zip(regression_data["after"],boid_data):
        for after_value,before_value in zip(after,before): 
            assert_almost_equal(after_value,before_value,delta=0.01)
예제 #10
0
def test_fly_towards_middle():
    regression_data=yaml.load(open(os.path.join(os.path.dirname(__file__),'fly_towards_middle.yml')))
    boid_data=regression_data["before"]
    fly_towards_middle(boid_data)
    for after,before in zip(regression_data["after"],boid_data):
        for after_value,before_value in zip(after,before): 
            assert_almost_equal(after_value,before_value,delta=0.01)
예제 #11
0
def test_avoid_nearby_birds():
    regression_data=yaml.load(open(os.path.join(os.path.dirname(__file__),'avoid_nearby_boids.yml')))
    boid_data=regression_data["before"]
    avoid_nearby_boids(boid_data)
    for after,before in zip(regression_data["after"],boid_data):
        for after_value,before_value in zip(after,before): 
            assert_almost_equal(after_value,before_value,delta=0.01)
예제 #12
0
 def test_length(self):
     l = session.query(Lake).get(1)
     r = session.query(Road).get(1)
     s = session.query(Spot).get(1)
     assert_almost_equal(session.scalar(l.lake_geom.length), 0.30157858985653774)
     assert_almost_equal(session.scalar(r.road_geom.length), 0.8551694164147895)
     ok_(not session.scalar(s.spot_location.length))
예제 #13
0
def _check_marginal_samples_match_scores(server, row, fi):
    row = loom.query.protobuf_to_data_row(row.diff)
    row[fi] = None
    to_sample = [i == fi for i in range(len(row))]
    samples = server.sample(to_sample, row, SAMPLE_COUNT)
    val = samples[0][fi]
    base_score = server.score(row)
    if isinstance(val, bool) or isinstance(val, int):
        probs_dict = {}
        samples = [sample[fi] for sample in samples]
        for sample in set(samples):
            row[fi] = sample
            probs_dict[sample] = numpy.exp(
                server.score(row) - base_score)
        if len(probs_dict) == 1:
            assert_almost_equal(probs_dict[sample], 1., places=SCORE_PLACES)
            return
        if min(probs_dict.values()) < MIN_CATEGORICAL_PROB:
            return
        gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
    elif isinstance(val, float):
        probs = numpy.exp([
            server.score(sample) - base_score
            for sample in samples
        ])
        samples = [sample[fi] for sample in samples]
        gof = density_goodness_of_fit(samples, probs, plot=True)
    assert_greater(gof, MIN_GOODNESS_OF_FIT)
예제 #14
0
 def test_y(self):
     s = session.query(Spot).get(1)
     assert_almost_equal(float(session.scalar(s.spot_location.y)), 42.9480095987261)
     s = session.query(Spot).filter(and_(Spot.spot_location.y < 0, Spot.spot_location.y > 42)).all()
     ok_(s is not None)
     assert_almost_equal(float(session.scalar(functions.y(WKTSpatialElement('POINT(-88.3655256496815 43.1402866687898)', geometry_type=Point.name)))),
                         43.1402866687898)
예제 #15
0
def test_logging():
    iris = load_iris()
    X, y = iris.data, iris.target

    X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
    Y = y.reshape(-1, 1)

    X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
    _, file_name = mkstemp()

    pbl = GraphCRF(n_features=4, n_states=3, inference_method=inference_method)
    logger = SaveLogger(file_name)
    svm = NSlackSSVM(pbl, C=100, n_jobs=1, logger=logger)
    svm.fit(X_train, y_train)

    score_current = svm.score(X_test, y_test)
    score_auto_saved = logger.load().score(X_test, y_test)

    alt_file_name = file_name + "alt"
    logger.save(svm, alt_file_name)
    logger.file_name = alt_file_name
    logger.load()
    score_manual_saved = logger.load().score(X_test, y_test)

    assert_less(.97, score_current)
    assert_less(.97, score_auto_saved)
    assert_less(.97, score_manual_saved)
    assert_almost_equal(score_auto_saved, score_manual_saved)
예제 #16
0
def test_multilabel_fully():
    # test inference and energy with fully connected model
    n_features = 5
    n_labels = 4
    edges = np.vstack([x for x in itertools.combinations(range(n_labels), 2)])
    model = MultiLabelClf(n_labels=n_labels, n_features=n_features,
                          edges=edges)
    rnd = np.random.RandomState(0)

    x = rnd.normal(size=n_features)
    w = rnd.normal(size=n_features * n_labels + 4 * len(edges))
    y = model.inference(x, w)

    # test joint_feature / energy
    joint_feature = model.joint_feature(x, y)
    energy = compute_energy(model._get_unary_potentials(x, w),
                            model._get_pairwise_potentials(x, w), edges, y)
    assert_almost_equal(energy, np.dot(joint_feature, w))

    # for continuous y
    #y_cont = model.inference(x, w, relaxed=True)
    y_continuous = np.zeros((n_labels, 2))
    pairwise_marginals = []
    for edge in edges:
        # indicator of one of four possible states of the edge
        pw = np.zeros((2, 2))
        pw[y[edge[0]], y[edge[1]]] = 1
        pairwise_marginals.append(pw)

    pairwise_marginals = np.vstack(pairwise_marginals)

    y_continuous[np.arange(n_labels), y] = 1
    assert_array_almost_equal(
        joint_feature, model.joint_feature(x, (y_continuous, pairwise_marginals)))
예제 #17
0
 def test_x(self):
     s = session.query(Spot).get(1)
     assert_almost_equal(float(session.scalar(s.spot_location.x)), -88.594586159235689)
     s = session.query(Spot).filter(and_(Spot.spot_location.x < 0, Spot.spot_location.y > 42)).all()
     ok_(s is not None)
     assert_almost_equal(float(session.scalar(functions.x(WKTSpatialElement('POINT(-88.3655256496815 43.1402866687898)', geometry_type=Point.name)))),
                         -88.3655256496815)
 def test_analyze_lifetime(self):
     resA = self.analyzer.analyze_lifetime(self.trajectory, self.stateA)
     resB = self.analyzer.analyze_lifetime(self.trajectory, self.stateB)
     assert_equal(resA.n_frames.tolist(), [3, 1, 2])  # A->B
     assert_equal(resB.n_frames.tolist(), [2, 1, 1, 11])  # B->A
     assert_almost_equal(resA.times.mean(), 6.0/3.0*0.1)
     assert_almost_equal(resB.times.mean(), 15.0/4.0*0.1)
예제 #19
0
def test_multilabel_independent():
    # test inference and energy with independent model
    edges = np.zeros((0, 2), dtype=np.int)
    n_features = 5
    n_labels = 4
    model = MultiLabelClf(n_labels=n_labels, n_features=n_features,
                          edges=edges)
    rnd = np.random.RandomState(0)

    x = rnd.normal(size=5)
    w = rnd.normal(size=n_features * n_labels)
    # test inference
    y = model.inference(x, w)
    y_ = np.dot(w.reshape(n_labels, n_features), x) > 0
    assert_array_equal(y, y_)

    # test joint_feature / energy
    joint_feature = model.joint_feature(x, y)
    energy = compute_energy(model._get_unary_potentials(x, w),
                            model._get_pairwise_potentials(x, w), edges, y)
    assert_almost_equal(energy, np.dot(joint_feature, w))

    # for continuous y
    y_continuous = np.zeros((n_labels, 2))
    y_continuous[np.arange(n_labels), y] = 1
    assert_array_almost_equal(
        joint_feature, model.joint_feature(x, (y_continuous, np.zeros((0, n_labels, n_labels)))))
예제 #20
0
def test_ap_amplitude_from_voltagebase1():
    """basic: Test AP_amplitude_from_voltagebase 1"""

    import efel
    import numpy

    stim_start = 500.0
    stim_end = 900.0

    data = numpy.loadtxt('testdata/basic/mean_frequency_1.txt')

    time = data[:, 0]
    voltage = data[:, 1]

    trace = {}

    trace['T'] = time
    trace['V'] = voltage
    trace['stim_start'] = [stim_start]
    trace['stim_end'] = [stim_end]

    features = ['AP_amplitude_from_voltagebase',
                'peak_voltage', 'voltage_base']

    feature_values = \
        efel.getFeatureValues(
            [trace],
            features)

    voltage_base = feature_values[0]['voltage_base'][0]
    for peak_voltage, ap_amplitude_from_voltagebase in zip(
            feature_values[0]['peak_voltage'],
            feature_values[0]['AP_amplitude_from_voltagebase']):
        nt.assert_almost_equal(peak_voltage - voltage_base,
                               ap_amplitude_from_voltagebase)
def test_f_divergence(places=1):
    """
    Tests various known relations of f-divergences to other divergences.
    """
    def f_alpha(alpha):
        if alpha == 1:
            def f(x):
                return x * np.log2(x)
        elif alpha == -1:
            def f(x):
                return - np.log2(x)
        else:
            def f(x):
                return 4. / (1. - alpha*alpha) * (1. - np.power(x, (1. + alpha)/2))
        return f

    def f_tsallis(alpha):
        def f(x):
            return (np.power(x, 1. - alpha) - 1.) / (alpha - 1.)
        return f
    test_functions = []
    alphas = [0.1, 0.5, 1.1]
    for alpha in alphas:
        test_functions.append((f_alpha(alpha), partial(alpha_divergence, alpha=alpha)))
        test_functions.append((f_tsallis(alpha), partial(tsallis_divergence, alpha=alpha)))
    dists = get_dists_3()
    for dist1 in dists:
        for dist2 in dists:
            if dist1 == dist2:
                continue
            for f, div_func in test_functions:
                div1 = f_divergence(dist1, dist2, f)
                div2 = div_func(dist1, dist2)
                assert_almost_equal(div1, div2, places=1)
    def test_call_with_linear_momentum_fix(self):
        toy_modifier = SingleAtomVelocityDirectionModifier(
            delta_v=[1.0, 2.0],
            subset_mask=[1, 2],
            remove_linear_momentum=True
        )
        new_toy_snap = toy_modifier(self.toy_snapshot)
        velocities = new_toy_snap.velocities
        momenta = velocities * new_toy_snap.masses[:, np.newaxis]
        assert_array_almost_equal(sum(momenta), np.array([0.0]*2))
        double_ke = sum(sum(momenta * velocities))
        assert_almost_equal(double_ke, 86.0)

        u_vel = old_div(u.nanometer, u.picosecond)
        u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)

        openmm_modifier = SingleAtomVelocityDirectionModifier(
            delta_v=1.2*u_vel,
            remove_linear_momentum=False
        )
        new_openmm_snap = openmm_modifier(self.openmm_snap)
        velocities = new_openmm_snap.velocities
        momenta = velocities * new_openmm_snap.masses[:, np.newaxis]
        zero_momentum = 0 * u_vel * u_mass
        total_momenta = sum(momenta, zero_momentum)
        assert_array_almost_equal(total_momenta,
                                  np.array([0.0]*3) * u_vel * u_mass)
    def test_remove_momentum_rescale_energy_openmm(self):
        # don't actually need to do everything with OpenMM, but do need to
        # add units
        u_vel = old_div(u.nanometer, u.picosecond)
        u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
        u_energy = old_div(u.kilojoule_per_mole, u.AVOGADRO_CONSTANT_NA)

        velocities = \
                np.array([[1.5, -1.0], [-1.0, 2.0], [0.25, -1.0]]) * u_vel
        masses = np.array([1.0, 1.5, 4.0]) * u_mass
        new_vel = self.openmm_modifier._remove_linear_momentum(
            velocities=velocities,
            masses=masses
        )
        new_momenta = new_vel * masses[:, np.newaxis]
        total_momenta = sum(new_momenta, new_momenta[0])
        assert_array_almost_equal(total_momenta,
                                  np.array([0.0]*2) * u_vel * u_mass)

        new_vel = self.openmm_modifier._rescale_kinetic_energy(
            velocities=velocities,
            masses=masses,
            double_KE=20.0 * u_energy
        )
        new_momenta = new_vel * masses[:, np.newaxis]
        total_momenta = sum(new_momenta, new_momenta[0])
        zero_energy = 0.0 * u_energy
        new_ke = sum(sum(new_momenta * new_vel, zero_energy), zero_energy)
        # tests require that the linear momentum be 0, and KE be correct
        assert_array_almost_equal(total_momenta,
                                  np.array([0.0]*2) * u_vel * u_mass)
        assert_equal(new_ke.unit, (20.0 * u_energy).unit)
        assert_almost_equal(new_ke._value, (20.0 * u_energy)._value)
예제 #24
0
def test_initial_value():
    m = Minuit(func3, pedantic=False, x=1., y=2., error_x=3., print_level=0)
    assert_almost_equal(m.args[0], 1.)
    assert_almost_equal(m.args[1], 2.)
    assert_almost_equal(m.values['x'], 1.)
    assert_almost_equal(m.values['y'], 2.)
    assert_almost_equal(m.errors['x'], 3.)
예제 #25
0
 def test_intercepts_from_means_with_true_normalization(self):
     expected_intercepts = self.true_intercepts_series
     true_normalization = ['m3', 2.0]
     calc_intercepts, calc_mean = wf.intercepts_from_means(
         self.data, true_normalization, self.true_loadings_series)
     assert_series_equal(calc_intercepts, expected_intercepts)
     assert_almost_equal(calc_mean, 0.0, places=2)
예제 #26
0
 def test_fit_background_C(self):
     self.m["B_K"].active = False
     self.m.fit_background()
     nt.assert_almost_equal(self.m["Offset"].offset.value,
                            1.71212121212)
     nt.assert_false(self.m["B_K"].active)
     nt.assert_true(self.m["C_K"].active)
예제 #27
0
def check_consistency_in_mesh1d(macro_grid, element_orders, var_list):
    my_mesh1d = Mesh1D(macro_grid, element_orders, var_list)

    # Testing mesh attributes
    numpy.testing.assert_allclose(my_mesh1d.macro_grid, macro_grid)
    numpy.testing.assert_array_equal(my_mesh1d.element_orders, element_orders)
    assert_equal(my_mesh1d.variables, var_list)
    assert_equal(my_mesh1d.dof, (sum(element_orders) + 1) * len(var_list))

    # Testing list of elements
    for idx_var, var in enumerate(var_list):
        for idx_el, element in enumerate(my_mesh1d.elem):
            if idx_var == 0:
                mesh_pos_1 = my_mesh1d.gm[idx_el][element.pos[var]][0]
                mesh_pos_2 = sum(my_mesh1d.elem[el].order for el in range(idx_el))
                numpy.testing.assert_array_equal(mesh_pos_1, mesh_pos_2)
            else:
                mesh_pos_1 = my_mesh1d.gm[idx_el][element.pos[previous_var]] + my_mesh1d.dof_1v
                mesh_pos_2 = my_mesh1d.gm[idx_el][element.pos[var]]
                numpy.testing.assert_array_equal(mesh_pos_1, mesh_pos_2)
        previous_var = var

    # Testing linear and quadratic integration
    domain_integral_a = sum(el.w_1v.dot(el.x_1v) for el in my_mesh1d.elem)
    domain_integral_b = sum(el.w_1v.dot(el.x_1v ** 2) for el in my_mesh1d.elem)
    assert_almost_equal(domain_integral_a, (my_mesh1d.macro_grid[-1]**2 - my_mesh1d.macro_grid[0]**2) / 2)
    assert_almost_equal(domain_integral_b, (my_mesh1d.macro_grid[-1]**3 - my_mesh1d.macro_grid[0]**3) / 3)
    def test_call(self):
        new_toy_snap = self.toy_modifier(self.toy_snapshot)
        assert_array_almost_equal(new_toy_snap.coordinates,
                                  self.toy_snapshot.coordinates)
        new_vel = new_toy_snap.velocities
        old_vel = self.toy_snapshot.velocities
        same_vel = [np.allclose(new_vel[i], old_vel[i]) 
                    for i in range(len(new_vel))]
        assert_equal(Counter(same_vel), Counter({True: 2, False: 1}))
        for new_v, old_v in zip(new_vel, old_vel):
            assert_almost_equal(sum([v**2 for v in new_v]),
                                sum([v**2 for v in old_v]))

        new_omm_snap = self.openmm_modifier(self.openmm_snap)
        n_atoms = len(self.openmm_snap.coordinates)
        assert_array_almost_equal(new_omm_snap.coordinates,
                                  self.openmm_snap.coordinates)
        new_vel = new_omm_snap.velocities
        old_vel = self.openmm_snap.velocities
        same_vel = [np.allclose(new_vel[i], old_vel[i]) 
                    for i in range(len(new_vel))]
        same_vel = [np.allclose(new_vel[i], old_vel[i]) 
                    for i in range(len(new_vel))]
        assert_equal(Counter(same_vel), Counter({True: n_atoms-1, False: 1}))
        u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2
        for new_v, old_v in zip(new_vel, old_vel):
            assert_almost_equal(
                sum([(v**2).value_in_unit(u_vel_sq) for v in new_v]),
                sum([(v**2).value_in_unit(u_vel_sq) for v in old_v])
            )
예제 #29
0
def test_generate_og_receptive_field():
    xpixels = 500 # simulated screen width
    ypixels = 500 # simulated screen height
    ppd = 1 # simulated visual angle
    scale_factor = 1.0 # simulated stimulus resampling rate
    distance = 5 # standard deviations to compute gauss out to
    xcenter = 0 # x coordinate of the pRF center
    ycenter = 0 # y coordinate of the pRF center
    sigma = 1 # width of the pRF
    
    test_value = 6 # this is the sum of a gaussian given 1 ppd 
                   # and a 1 sigma prf centered on (0,0)
                   
    # generate the visuotopic coordinates
    dx,dy = generate_coordinate_matrices(xpixels,
                                         ypixels,
                                         ppd,
                                         scale_factor)
    
    # generate a pRF at (0,0) and 1 sigma wide
    rf = generate_og_receptive_field(xcenter, ycenter, sigma, dx, dy)
    
    # divide by integral
    rf /= 2 * np.pi * sigma ** 2
    
    # compare the volume of the pRF to a known value
    nt.assert_almost_equal(np.sum(rf),1)
예제 #30
0
def test_mean_frequency1():
    """basic: Test mean_frequency 1"""

    import efel
    import numpy

    stim_start = 500.0
    stim_end = 900.0

    data = numpy.loadtxt('testdata/basic/mean_frequency_1.txt')

    time = data[:, 0]
    voltage = data[:, 1]

    trace = {}

    trace['T'] = time
    trace['V'] = voltage
    trace['stim_start'] = [stim_start]
    trace['stim_end'] = [stim_end]

    features = ['mean_frequency']

    feature_values = \
        efel.getFeatureValues(
            [trace],
            features)
    nt.assert_almost_equal(feature_values[0]['mean_frequency'], 15.2858453)
예제 #31
0
def test_ttest():
    """Test univariate t-test functions"""
    ds = datasets.get_uv()

    print(test.ttest('fltvar', ds=ds))
    print(test.ttest('fltvar', 'A', ds=ds))
    print(test.ttest('fltvar', 'A%B', ds=ds))
    print(test.ttest('fltvar', 'A', match='rm', ds=ds))
    print(test.ttest('fltvar', 'A', 'a1', match='rm', ds=ds))
    print(test.ttest('fltvar', 'A%B', ('a1', 'b1'), match='rm', ds=ds))

    # Prepare data for scipy
    a1_index = ds.eval("A == 'a1'")
    a2_index = ds.eval("A == 'a2'")
    b1_index = ds.eval("B == 'b1'")
    a1_in_b1_index = np.logical_and(a1_index, b1_index)
    a2_in_b1_index = np.logical_and(a2_index, b1_index)

    # TTest1Samp
    res = test.TTest1Sample('fltvar', ds=ds)
    t, p = scipy.stats.ttest_1samp(ds['fltvar'], 0)
    assert_almost_equal(res.t, t, 10)
    assert_almost_equal(res.p, p, 10)
    res = test.TTest1Sample('fltvar', ds=ds, tail=1)
    assert_almost_equal(res.t, t, 10)
    assert_almost_equal(res.p, p / 2., 10)

    # TTestInd
    res = test.TTestInd('fltvar', 'A', 'a1', 'a2', ds=ds)
    t, p = scipy.stats.ttest_ind(ds[a1_index, 'fltvar'], ds[a2_index,
                                                            'fltvar'])
    assert_almost_equal(res.t, t, 10)
    assert_almost_equal(res.p, p, 10)

    # TTestRel
    res = test.TTestRel('fltvar', 'A', 'a1', 'a2', 'rm', "B=='b1'", ds)
    a1 = ds[a1_in_b1_index, 'fltvar'].x
    a2 = ds[a2_in_b1_index, 'fltvar'].x
    diff = a1 - a2
    t, p = scipy.stats.ttest_rel(a1, a2)
    assert_array_equal(res.diff.x, diff)
    eq_(res.df, len(a1) - 1)
    eq_(res.tail, 0)
    assert_almost_equal(res.t, t, 10)
    assert_almost_equal(res.p, p, 10)
    print(res)
    print(asfmtext(res))

    res = test.TTestRel('fltvar', 'A', 'a1', 'a2', 'rm', "B=='b1'", ds, 1)
    assert_array_equal(res.diff.x, diff)
    eq_(res.df, len(a1) - 1)
    eq_(res.tail, 1)
    assert_almost_equal(res.t, t, 10)
    assert_almost_equal(res.p, p / 2 if t > 0 else 1 - p / 2, 10)
    print(res)
    print(asfmtext(res))

    res = test.TTestRel('fltvar', 'A', 'a1', 'a2', 'rm', "B=='b1'", ds, -1)
    assert_array_equal(res.diff.x, diff)
    eq_(res.df, len(a1) - 1)
    eq_(res.tail, -1)
    assert_almost_equal(res.t, t, 10)
    assert_almost_equal(res.p, p / 2 if t < 0 else 1 - p / 2, 10)
    print(res)
    print(asfmtext(res))
예제 #32
0
    def test_that_can_convert_julian_tai_to_datetime_obj(self):
        import numpy as np
        sec = 1.0 / (24.0 * 60.0 * 60.0)
        days_since_standard_epoch = 143541.0  # Almost, but not quite 365.2425*393.0, not sure why...

        a = np.arange(6).reshape(2, 3)
        b = convert_sec_since_to_std_time(a, dt.datetime(1993, 1, 1))

        eq_(a.shape, b.shape)
        assert_almost_equal(b[0][0], days_since_standard_epoch)
        assert_almost_equal(b[0][1], days_since_standard_epoch + 1 * sec)
        assert_almost_equal(b[0][2], days_since_standard_epoch + 2 * sec)
        assert_almost_equal(b[1][0], days_since_standard_epoch + 3 * sec)
        assert_almost_equal(b[1][1], days_since_standard_epoch + 4 * sec)
        assert_almost_equal(b[1][2], days_since_standard_epoch + 5 * sec)
예제 #33
0
    def test_that_can_convert_masked_tai_to_datetime_obj(self):
        import numpy.ma as ma
        sec = 1.0 / (24.0 * 60.0 * 60.0)
        days_since_standard_epoch = 143541.0  # Almost, but not quite 365.2425*393.0, not sure why...

        a = ma.array([0, 1, 2, 3, 4, 5],
                     mask=[False, False, True, False, False,
                           False]).reshape(2, 3)
        b = convert_sec_since_to_std_time(a, dt.datetime(1993, 1, 1))

        eq_(a.shape, b.shape)
        assert_almost_equal(b[0][0], days_since_standard_epoch)
        assert_almost_equal(b[0][1], days_since_standard_epoch + 1 * sec)
        assert_almost_equal(b.filled()[0][2], b.fill_value)
        assert_almost_equal(b[1][0], days_since_standard_epoch + 3 * sec)
        assert_almost_equal(b[1][1], days_since_standard_epoch + 4 * sec)
        assert_almost_equal(b[1][2], days_since_standard_epoch + 5 * sec)
예제 #34
0
def test_number_density():
    ethanol = from_atom_frac({'C': 2, 'H': 6, 'O': 1}, density=0.78900)
    obs = ethanol.number_density()
    exp = 9.2825E22
    assert_almost_equal(obs / exp, 1.0, 4)
예제 #35
0
def test_mass_density():
    ethanol = from_atom_frac({'C': 2, 'H': 6, 'O': 1})
    atom_density_ethanol = 9.282542841E22  # atom density not molecule density
    mass_density = ethanol.mass_density(atom_density_ethanol)
    expected_mass_density = 0.78900
    assert_almost_equal(mass_density, expected_mass_density, 4)
예제 #36
0
 def test_even_probabilities(self):
     assert_almost_equal(calculate_renyi(self.ok, 2), log(2))
     assert_almost_equal(calculate_renyi(self.ok, 3), log(2))
     assert_almost_equal(calculate_renyi(self.ok, 9), log(2))
예제 #37
0
 def test_calculate_entropy(self):
     assert_almost_equal(calculate_entropy(self.zeros), 0.0)
     assert_almost_equal(calculate_entropy(self.ones), 0.0)
     assert_almost_equal(calculate_entropy(self.negatives), 0.0)
     assert_almost_equal(calculate_entropy(self.ok), log(2))
     assert_almost_equal(calculate_entropy(self.ok_2), log(3))
예제 #38
0
 def test_constraint_undirected(self):
     constraint = nx.constraint(self.G)
     assert_almost_equal(round(constraint['G'], 3), 0.400)
     assert_almost_equal(round(constraint['A'], 3), 0.595)
     assert_almost_equal(round(constraint['C'], 3), 1)
예제 #39
0
 def test_constraint_directed(self):
     constraint = nx.constraint(self.D)
     assert_almost_equal(round(constraint[0], 3), 1.003)
     assert_almost_equal(round(constraint[1], 3), 1.003)
     assert_almost_equal(round(constraint[2], 3), 1.389)
예제 #40
0
 def test_effective_size_undirected_borgatti(self):
     effective_size = nx.effective_size(self.G)
     assert_almost_equal(round(effective_size['G'], 2), 4.67)
     assert_almost_equal(round(effective_size['A'], 2), 2.50)
     assert_almost_equal(round(effective_size['C'], 2), 1)
예제 #41
0
def test_metric_minimum_average_direct_flip():
    feature = dipymetric.IdentityFeature()

    class MinimumAverageDirectFlipMetric(dipymetric.Metric):
        def __init__(self, feature):
            super(MinimumAverageDirectFlipMetric,
                  self).__init__(feature=feature)

        @property
        def is_order_invariant(self):
            return True  # Ordering is handled in the distance computation

        def are_compatible(self, shape1, shape2):
            return shape1[0] == shape2[0]

        def dist(self, v1, v2):
            average_euclidean = lambda x, y: np.mean(norm(x - y, axis=1))
            dist_direct = average_euclidean(v1, v2)
            dist_flipped = average_euclidean(v1, v2[::-1])
            return min(dist_direct, dist_flipped)

    for metric in [
            MinimumAverageDirectFlipMetric(feature),
            dipymetric.MinimumAverageDirectFlipMetric(feature)
    ]:

        # Test special cases of the MDF distance.
        assert_equal(metric.dist(s, s), 0.)
        assert_equal(metric.dist(s, s[::-1]), 0.)

        # Translation
        offset = np.array([0.8, 1.3, 5], dtype=dtype)
        assert_almost_equal(metric.dist(s, s + offset), norm(offset), 5)

        # Scaling
        M_scaling = np.diag([1.2, 2.8, 3]).astype(dtype)
        s_mean = np.mean(s, axis=0)
        s_zero_mean = s - s_mean
        s_scaled = np.dot(M_scaling, s_zero_mean.T).T + s_mean
        d = np.mean(norm((np.diag(M_scaling) - 1) * s_zero_mean, axis=1))
        assert_almost_equal(metric.dist(s, s_scaled), d, 5)

        # Rotation
        from dipy.core.geometry import rodrigues_axis_rotation
        rot_axis = np.array([1, 2, 3], dtype=dtype)
        M_rotation = rodrigues_axis_rotation(rot_axis, 60.).astype(dtype)
        s_mean = np.mean(s, axis=0)
        s_zero_mean = s - s_mean
        s_rotated = np.dot(M_rotation, s_zero_mean.T).T + s_mean

        opposite = norm(np.cross(rot_axis, s_zero_mean),
                        axis=1) / norm(rot_axis)
        distances = np.sqrt(2 * opposite**2 *
                            (1 - np.cos(60. * np.pi / 180.))).astype(dtype)
        d = np.mean(distances)
        assert_almost_equal(metric.dist(s, s_rotated), d, 5)

        for s1, s2 in itertools.product(*[streamlines] *
                                        2):  # All possible pairs
            # Extract features since metric doesn't work directly on streamlines
            f1 = metric.feature.extract(s1)
            f2 = metric.feature.extract(s2)

            # Test method are_compatible
            same_nb_points = f1.shape[0] == f2.shape[0]
            assert_equal(metric.are_compatible(f1.shape, f2.shape),
                         same_nb_points)

            # Test method dist if features are compatible
            if metric.are_compatible(f1.shape, f2.shape):
                distance = metric.dist(f1, f2)
                if np.all(f1 == f2):
                    assert_equal(distance, 0.)

                assert_almost_equal(distance, dipymetric.dist(metric, s1, s2))
                assert_almost_equal(distance, dipymetric.mdf(s1, s2))
                assert_true(distance >= 0.)

        # This metric type is order invariant
        assert_true(metric.is_order_invariant)
        for s1, s2 in itertools.product(*[streamlines] *
                                        2):  # All possible pairs
            f1 = metric.feature.extract(s1)
            f2 = metric.feature.extract(s2)

            if not metric.are_compatible(f1.shape, f2.shape):
                continue

            f1_flip = metric.feature.extract(s1[::-1])
            f2_flip = metric.feature.extract(s2[::-1])

            distance = metric.dist(f1, f2)
            assert_almost_equal(metric.dist(f1_flip, f2_flip), distance)

            if not np.all(f1_flip == f2_flip):
                assert_true(np.allclose(metric.dist(f1, f2_flip), distance))
                assert_true(np.allclose(metric.dist(f1_flip, f2), distance))
예제 #42
0
 def test_effective_size_directed(self):
     effective_size = nx.effective_size(self.D)
     assert_almost_equal(round(effective_size[0], 3), 1.167)
     assert_almost_equal(round(effective_size[1], 3), 1.167)
     assert_almost_equal(round(effective_size[2], 3), 1)
예제 #43
0
def test_large_monty_prize():
    assert_almost_equal(large_monty_prize.log_probability(
        (True,  True,  'A')), numpy.log(0.3))
    assert_almost_equal(large_monty_prize.log_probability(
        (True,  False, 'C')), numpy.log(0.4))
    assert_almost_equal(large_monty_prize.log_probability(
        (False, True,  'B')), numpy.log(0.9))
    assert_almost_equal(large_monty_prize.log_probability(
        (False, False, 'A')), float("-inf"))

    data = [[True,  'A', 'A', 'C', 1, True],
            [True,  'A', 'A', 'C', 0, True],
            [False, 'A', 'A', 'B', 1, False],
            [False, 'A', 'A', 'A', 2, False],
            [False, 'A', 'A', 'C', 1, False],
            [False, 'B', 'B', 'B', 2, False],
            [False, 'B', 'B', 'C', 0, False],
            [True,  'C', 'C', 'A', 2, True],
            [True,  'C', 'C', 'C', 1, False],
            [True,  'C', 'C', 'C', 0, False],
            [True,  'C', 'C', 'C', 2, True],
            [True,  'C', 'B', 'A', 1, False]]

    large_monty_network.fit(data)

    assert_almost_equal(large_monty_prize.log_probability(
        (True, True, 'C')), numpy.log(0.5))
    assert_equal(large_monty_prize.log_probability(
        (True, True, 'B')), float("-inf"))

    a = large_monty_prize.log_probability((True, False, 'A'))
    b = large_monty_prize.log_probability((True, False, 'B'))
    c = large_monty_prize.log_probability((True, False, 'C'))

    assert_almost_equal(a, b)
    assert_almost_equal(b, c)

    assert_equal(large_monty_prize.log_probability(
        (False, False, 'C')), float("-inf"))
    assert_almost_equal(large_monty_prize.log_probability(
        (False, True, 'C')), numpy.log(2. / 3))
예제 #44
0
def test_float_from_string():
    fv = sc.FloatType('2.01')
    assert_almost_equal(fv, 2.01)
예제 #45
0
def test_titanic_network():
    assert_almost_equal(passenger.log_probability('survive'), numpy.log(0.6))
    assert_almost_equal(passenger.log_probability('survive'), numpy.log(0.6))

    assert_almost_equal(gender.log_probability(('survive', 'male')),   float("-inf"))
    assert_almost_equal(gender.log_probability(('survive', 'female')), 0.0)
    assert_almost_equal(gender.log_probability(('perish', 'male')),    0.0)
    assert_almost_equal(gender.log_probability(('perish', 'female')),  float("-inf"))

    assert_almost_equal(tclass.log_probability(('survive', 'first')), float("-inf"))
    assert_almost_equal(tclass.log_probability(('survive', 'second')), 0.0)
    assert_almost_equal(tclass.log_probability(('survive', 'third')), float("-inf"))
    assert_almost_equal(tclass.log_probability(('perish', 'first')), 0.0)
    assert_almost_equal(tclass.log_probability(('perish', 'second')), float("-inf"))
    assert_almost_equal(tclass.log_probability(('perish', 'third')), float("-inf"))
예제 #46
0
def test_metric_cosine():
    feature = dipymetric.VectorBetweenEndpointsFeature()

    class CosineMetric(dipymetric.Metric):
        def __init__(self, feature):
            super(CosineMetric, self).__init__(feature=feature)

        def are_compatible(self, shape1, shape2):
            # Cosine metric works on vectors.
            return shape1 == shape2 and shape1[0] == 1

        def dist(self, v1, v2):
            # Check if we have null vectors
            if norm(v1) == 0:
                return 0. if norm(v2) == 0 else 1.

            v1_normed = v1.astype(np.float64) / norm(v1.astype(np.float64))
            v2_normed = v2.astype(np.float64) / norm(v2.astype(np.float64))
            cos_theta = np.dot(v1_normed, v2_normed.T)
            # Make sure it's in [-1, 1], i.e. within domain of arccosine
            cos_theta = np.minimum(cos_theta, 1.)
            cos_theta = np.maximum(cos_theta, -1.)
            return np.arccos(cos_theta) / np.pi  # Normalized cosine distance

    for metric in [CosineMetric(feature), dipymetric.CosineMetric(feature)]:
        # Test special cases of the cosine distance.
        v0 = np.array([[0, 0, 0]], dtype=np.float32)
        v1 = np.array([[1, 2, 3]], dtype=np.float32)
        v2 = np.array([[1, -1. / 2, 0]], dtype=np.float32)
        v3 = np.array([[-1, -2, -3]], dtype=np.float32)

        assert_equal(metric.dist(v0, v0), 0.)  # dot-dot
        assert_equal(metric.dist(v0, v1), 1.)  # dot-line
        assert_equal(metric.dist(v1, v1), 0.)  # collinear
        assert_equal(metric.dist(v1, v2), 0.5)  # orthogonal
        assert_equal(metric.dist(v1, v3), 1.)  # opposite

        for s1, s2 in itertools.product(*[streamlines] *
                                        2):  # All possible pairs
            # Extract features since metric doesn't work directly on streamlines
            f1 = metric.feature.extract(s1)
            f2 = metric.feature.extract(s2)

            # Test method are_compatible
            are_vectors = f1.shape[0] == 1 and f2.shape[0] == 1
            same_dimension = f1.shape[1] == f2.shape[1]
            assert_equal(metric.are_compatible(f1.shape, f2.shape), are_vectors
                         and same_dimension)

            # Test method dist if features are compatible
            if metric.are_compatible(f1.shape, f2.shape):
                distance = metric.dist(f1, f2)
                if np.all(f1 == f2):
                    assert_almost_equal(distance, 0.)

                assert_almost_equal(distance, dipymetric.dist(metric, s1, s2))
                assert_true(distance >= 0.)
                assert_true(distance <= 1.)

        # This metric type is not order invariant
        assert_false(metric.is_order_invariant)
        for s1, s2 in itertools.product(*[streamlines] *
                                        2):  # All possible pairs
            f1 = metric.feature.extract(s1)
            f2 = metric.feature.extract(s2)

            if not metric.are_compatible(f1.shape, f2.shape):
                continue

            f1_flip = metric.feature.extract(s1[::-1])
            f2_flip = metric.feature.extract(s2[::-1])

            distance = metric.dist(f1, f2)
            assert_almost_equal(metric.dist(f1_flip, f2_flip), distance)

            if not np.all(f1_flip == f2_flip):
                assert_false(metric.dist(f1, f2_flip) == distance)
                assert_false(metric.dist(f1_flip, f2) == distance)
예제 #47
0
def test_chow_liu_structure_learning():
    logps = -19.8282, -344.248785, -4842.40158, -603.2370
    for X, logp in zip(datasets, logps):
        model = BayesianNetwork.from_samples(X, algorithm='chow-liu')
        assert_almost_equal(model.log_probability(X).sum(), logp, 4)
예제 #48
0
def test_large_monty_remaining():
    model = large_monty_remaining

    assert_almost_equal(model.log_probability(0), numpy.log(0.1))
    assert_almost_equal(model.log_probability(1), numpy.log(0.7))
    assert_almost_equal(model.log_probability(2), numpy.log(0.2))

    data = [[True,  'A', 'A', 'C', 1, True],
            [True,  'A', 'A', 'C', 0, True],
            [False, 'A', 'A', 'B', 1, False],
            [False, 'A', 'A', 'A', 2, False],
            [False, 'A', 'A', 'C', 1, False],
            [False, 'B', 'B', 'B', 2, False],
            [False, 'B', 'B', 'C', 0, False],
            [True,  'C', 'C', 'A', 2, True],
            [True,  'C', 'C', 'C', 1, False],
            [True,  'C', 'C', 'C', 0, False],
            [True,  'C', 'C', 'C', 2, True],
            [True,  'C', 'B', 'A', 1, False]]

    large_monty_network.fit(data)

    assert_almost_equal(model.log_probability(0), numpy.log(3. / 12))
    assert_almost_equal(model.log_probability(1), numpy.log(5. / 12))
    assert_almost_equal(model.log_probability(2), numpy.log(4. / 12))
예제 #49
0
 def check_cp(transition, analysis, results):
     # a little nested function to that does the actual check
     for (i, ens) in enumerate(transition.ensembles):
         cp_ens = analysis.crossing_probability(ens)
         for x in results[i]:
             assert_almost_equal(results[i][x], cp_ens(x))
예제 #50
0
def test_greedy_nan_structure_learning():
    logps = -7.5239, -159.6505, -2058.5706, -203.7662
    for X, logp in zip(datasets_nan, logps):
        model = BayesianNetwork.from_samples(X, algorithm='greedy')
        assert_almost_equal(model.log_probability(X).sum(), logp, 4)
예제 #51
0
 def test_flux_matrix_pd_None(self):
     series = flux_matrix_pd(self.fluxes, sort_method=None)
     for idx in self.indices:
         assert_almost_equal(series[idx], self.expected_series[idx])
예제 #52
0
def test_greedy_structure_learning():
    logps = -19.8282, -345.9527, -4847.59688, -611.0356
    for X, logp in zip(datasets, logps):
        model = BayesianNetwork.from_samples(X, algorithm='greedy')
        assert_almost_equal(model.log_probability(X).sum(), logp, 4)
예제 #53
0
def test_inventory_audit():
    ev = get_sqlite_evaluator()
    audit_resources = an.inventory_audit(ev,
                                         agentids=[30])['ResourceId'].tolist()
    ans_audit_resources = ev.eval('AgentStateInventories')['ResourceId']
    assert_almost_equal(audit_resources, ans_audit_resources)
예제 #54
0
 def test_rate_matrix_calculation(self):
     mistis_analysis = self._make_tis_analysis(self.mistis)
     mistis_rate = mistis_analysis.rate_matrix(steps=self.mistis_steps)
     pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
     for (vol_1, vol_2) in pairs:
         assert_almost_equal(mistis_rate[(vol_1, vol_2)], 0.0125)
예제 #55
0
 def assert_no_error(u, x, t, n):
     u_e = exact_solution(x, t[n])
     diff = abs(u - u_e).max()
     nt.assert_almost_equal(diff, 0, places=13)
예제 #56
0
    def test_with_minus_move_flux(self):
        network = self.mstis
        scheme = paths.DefaultScheme(network, engine=RandomMDEngine())
        scheme.build_move_decision_tree()

        # create the minus move steps
        # `center` is the edge of the state/innermost interface
        center = {self.state_A: 0.0, self.state_B: 1.0}
        replica = {self.state_A: -1, self.state_B: -2}
        minus_ensemble_to_mover = {m.minus_ensemble: m
                                   for m in scheme.movers['minus']}
        state_to_minus_ensemble = {ens.state_vol: ens
                                   for ens in network.minus_ensembles}
        minus_changes = []
        # `delta` is the change on either side for in vs. out
        for (state, delta) in [(self.state_A, 0.1), (self.state_B, -0.1)]:
            minus_ens = state_to_minus_ensemble[state]
            minus_mover = minus_ensemble_to_mover[minus_ens]
            a_in = center[state] - delta
            a_out = center[state] + delta
            # note that these trajs are equivalent to minus move
            # descriptions in TestMinusMoveFlux
            seq_1 = [a_in] + [a_out]*2 + [a_in]*5 + [a_out]*5 + [a_in]
            seq_2 = [a_in] + [a_out]*3 + [a_in]*3 + [a_out]*3 + [a_in]

            for seq in [seq_1, seq_2]:
                traj = make_1d_traj(seq)
                assert_equal(minus_ens(traj), True)
                samp = paths.Sample(trajectory=traj,
                                    ensemble=minus_ens,
                                    replica=replica[state])
                sample_set = paths.SampleSet([samp])
                change = paths.AcceptedSampleMoveChange(
                    samples=[samp],
                    mover=minus_mover,
                    details=paths.Details()
                )
                minus_changes.append(change)

        active = self.mstis_steps[0].active
        steps = []
        cycle = -1
        for m_change in minus_changes:
            cycle += 1
            active = active.apply_samples(m_change.samples)
            step = paths.MCStep(mccycle=cycle,
                                active=active,
                                change=m_change)
            steps.append(step)
            for old_step in self.mstis_steps[1:]:
                cycle += 1
                active = active.apply_samples(old_step.change.samples)
                step = paths.MCStep(mccycle=cycle,
                                    active=active,
                                    change=old_step.change)
                steps.append(step)

        analysis = StandardTISAnalysis(
            network=self.mstis,
            scheme=scheme,
            max_lambda_calcs={t: {'bin_width': 0.1,
                                  'bin_range': (-0.1, 1.1)}
                              for t in network.sampling_transitions},
            steps=steps
        )

        # now we actually verify correctness
        avg_t_in = (5.0 + 3.0) / 2
        avg_t_out = (2.0 + 5.0 + 3.0 + 3.0) / 4
        expected_flux = 1.0 / (avg_t_in + avg_t_out)

        # NOTE: Apparently this approach screws up the TCP calculation. I
        # think this is a problem in the fake data, not the simulation.
        for flux in analysis.flux_matrix.values():
            assert_almost_equal(flux, expected_flux)
예제 #57
0
def test_inequalities():
    # Memorize the dataset.
    hyperparameters = dict(
        loss="custom:mse_with_inequalities",
        peptide_amino_acid_encoding="one-hot",
        activation="tanh",
        layer_sizes=[16],
        max_epochs=50,
        minibatch_size=32,
        random_negative_rate=0.0,
        early_stopping=False,
        validation_split=0.0,
        locally_connected_layers=[
            {
                "filters": 8,
                "activation": "tanh",
                "kernel_size": 3
            }
        ],
        dense_layer_l1_regularization=0.0,
        dropout_probability=0.0)

    df = pandas.DataFrame()
    df["peptide"] = random_peptides(1000, length=9)

    # First half are binders
    df["binder"] = df.index < len(df) / 2
    df["value"] = df.binder.map({True: 100, False: 5000})
    df.loc[:10, "value"] = 1.0  # some strong binders
    df["inequality1"] = "="
    df["inequality2"] = df.binder.map({True: "<", False: "="})
    df["inequality3"] = df.binder.map({True: "=", False: ">"})

    # "A" at start of peptide indicates strong binder
    df["peptide"] = [
        ("C" if not row.binder else "A") + row.peptide[1:]
        for _, row in df.iterrows()
    ]

    fit_kwargs = {'verbose': 0}

    # Prediction1 uses no inequalities (i.e. all are (=))
    predictor = Class1NeuralNetwork(**hyperparameters)
    predictor.fit(
        df.peptide.values,
        df.value.values,
        inequalities=df.inequality1.values,
        **fit_kwargs)
    df["prediction1"] = predictor.predict(df.peptide.values)

    # Prediction2 has a (<) inequality on binders and an (=) on non-binders
    predictor = Class1NeuralNetwork(**hyperparameters)
    predictor.fit(
        df.peptide.values,
        df.value.values,
        inequalities=df.inequality2.values,
        **fit_kwargs)
    df["prediction2"] = predictor.predict(df.peptide.values)

    # Prediction3 has a (=) inequality on binders and an (>) on non-binders
    predictor = Class1NeuralNetwork(**hyperparameters)
    predictor.fit(
        df.peptide.values,
        df.value.values,
        inequalities=df.inequality3.values,
        **fit_kwargs)
    df["prediction3"] = predictor.predict(df.peptide.values)

    df_binders = df.loc[df.binder]
    df_nonbinders = df.loc[~df.binder]

    print("***** Binders: *****")
    print(df_binders.head(5))

    print("***** Non-binders: *****")
    print(df_nonbinders.head(5))

    # Binders should always be given tighter predicted affinity than non-binders
    assert_less(df_binders.prediction1.mean(), df_nonbinders.prediction1.mean())
    assert_less(df_binders.prediction2.mean(), df_nonbinders.prediction2.mean())
    assert_less(df_binders.prediction3.mean(), df_nonbinders.prediction3.mean())

    # prediction2 binders should be tighter on average than prediction1
    # binders, since prediction2 has a (<) inequality for binders.
    # Non-binders should be about the same between prediction2 and prediction1
    assert_less(df_binders.prediction2.mean(), df_binders.prediction1.mean())
    assert_almost_equal(
        df_nonbinders.prediction2.mean(),
        df_nonbinders.prediction1.mean(),
        delta=3000)

    # prediction3 non-binders should be weaker on average than prediction2 (or 1)
    # non-binders, since prediction3 has a (>) inequality for these peptides.
    # Binders should be about the same.
    assert_greater(
        df_nonbinders.prediction3.mean(),
        df_nonbinders.prediction2.mean())
    assert_greater(
        df_nonbinders.prediction3.mean(),
        df_nonbinders.prediction1.mean())
    assert_almost_equal(
        df_binders.prediction3.mean(),
        df_binders.prediction1.mean(),
        delta=3000)
예제 #58
0
def test_shuffle_series():
    """
        Test function to ensure Inference.shuffle_series() reorders data within columns.
        If parameter 'only' is specified, only that column should be shuffled; otherwise
        all columns must be shufffled.
    """
    DF = coupled_random_walks(  S1 = 100, S2 = 100, T = 10, 
                                N = 200, mu1 = 0.1, mu2 = 0.02, 
                                sigma1 = 0.01, sigma2 = 0.01, 
                                alpha = 0.1, epsilon=0, lag = 2)
    
    ### Shuffling both time series S1 & S2
    DF_shuffled = shuffle_series(DF)

    # Ensure rows are shuffled but not lost
    assert_almost_equal(np.mean(DF['S1']), np.mean(DF_shuffled['S1']))
    assert_almost_equal(np.mean(DF['S2']), np.mean(DF_shuffled['S2']))
    # Ensure rows are shuffled independently 
    assert not np.sum(DF['S1'].head(20)) == np.sum(DF_shuffled['S1'].head(20))
    assert not DF.head(10).equals(DF_shuffled.head(10))


    ### Shuffling only time series S1
    S1_shuffled = shuffle_series(DF, only=['S1'])

    # Ensure rows are shuffled but not lost
    assert_almost_equal(np.mean(DF['S1']), np.mean(S1_shuffled['S1']))
    assert_almost_equal(np.mean(DF['S2']), np.mean(S1_shuffled['S2']))

    # Ensure only S1 has been shuffled
    assert DF['S2'].head(10).equals(S1_shuffled['S2'].head(10))
    assert not DF['S1'].head(10).equals(S1_shuffled['S1'].head(10))

    ### Shuffling only time series S1
    S2_shuffled = shuffle_series(DF, only=['S2'])

    # Ensure rows are shuffled but not lost
    assert_almost_equal(np.mean(DF['S1']), np.mean(S1_shuffled['S1']))
    assert_almost_equal(np.mean(DF['S2']), np.mean(S1_shuffled['S2']))

    # Ensure only S2 has been shuffled
    assert DF['S1'].head(10).equals(S2_shuffled['S1'].head(10))
    assert not DF['S2'].head(10).equals(S2_shuffled['S2'].head(10))
def the_local_topic_distribution_is(step, distribution):
    distribution = json.loads(distribution)
    for index, topic_dist in enumerate(world.local_topic_distribution):
        assert_almost_equal(topic_dist["probability"],
                            distribution[index],
                            places=5)
예제 #60
0
def test_separable(c=np.pi):
    """
    Verify the solver with a separable exact solution
        u_e = X(x)Y(y)T(t)
    where
        X = Ax^3 - (3/2)*A*Lx*x**2
        Y = By^3 - (3/2)*B*Ly*y**2
        T = D*t
    Uses a constant wave velocity q, and no damping, b = 0.
    """
    import nose.tools as nt

    class CubicSolution(WaveProblem):
        def __init__(self, A, B, D, dx, dy, Lx, Ly, q_const=2.0):
            self.A, self.B, self.D = A, B, D
            self.dx, self.dy = dx, dy
            self.Lx, self.Ly = Lx, Ly
            self.q_const = q_const
            self.b = 0

        def X(self, x):
            A, Lx = self.A, self.Lx
            return A * x**3 - 1.5 * A * Lx * x**2

        def Y(self, y):
            B, Ly = self.B, self.Ly
            return B * y**3 - 1.5 * B * Ly * y**2

        def T(self, t):
            D = self.D
            return D * t

        def u_e(self, x, y, t):
            X, Y, T = self.X, self.Y, self.T
            return X(x) * Y(y) * T(t)

        def V(self, x, y):
            X, Y = self.X, self.Y
            return D * X(x) * Y(y)

        def I(self, x, y):
            return self.u_e(x, y, 0)

        def f(self, x, y, t):
            A, B, D = self.A, self.B, self.D
            X, Y, T = self.X, self.Y, self.T
            Lx, Ly = self.Lx, self.Ly
            dx, dy = self.dx, self.dy
            q = self.q_const

            # Vector evaluated
            if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
                # All mesh points
                fx = A * (6 * x[:, np.newaxis] - 3 * Lx) * Y(y[np.newaxis, :])
                fy = B * (6 * y[np.newaxis, :] - 3 * Ly) * X(x[:, np.newaxis])
                f = -q * (fx + fy) * T(t)
                # Add extra contributions at boundaries
                f[0, :] -= 2 * A * dx * q * Y(y[:]) * T(t)
                f[-1, :] += 2 * A * dx * q * Y(y[:]) * T(t)
                f[:, 0] -= 2 * B * dy * q * X(x[:]) * T(t)
                f[:, -1] += 2 * B * dy * q * X(x[:]) * T(t)

                # print "time ", t
                # print f

            # Pointwise evaluated
            else:
                fx = A * (6 * x - 3 * Lx) * Y(y)
                fy = B * (6 * y - 3 * Ly) * X(x)
                f = -q * (fx + fy) * T(t)
                # Add extra contributions at boundaries
                tol = 1e-14
                if abs(x) < tol:
                    f -= 2 * A * dx * q * Y(y) * T(t)
                if abs(x - Lx) < tol:
                    f += 2 * A * dx * q * Y(y) * T(t)
                if abs(y) < tol:
                    f -= 2 * B * dy * q * X(x) * T(t)
                if abs(y - Ly) < tol:
                    f += 2 * B * dy * q * X(x) * T(t)

            return f

    Lx = 0.3
    Ly = 0.3
    Nx = 3
    Ny = 3
    dx = Lx / float(Nx)
    dy = Ly / float(Ny)
    dt = 0.01
    T = 10
    q = 1.2
    b = 0.0
    A = 2.
    B = 2.
    D = 1.

    problem = CubicSolution(A, B, D, dx, dy, Lx, Ly, q_const=q)

    print "Verifying solver for a separable solution."
    for v in ["scalar", "vectorized"]:
        solver = WaveSolver(problem, Lx, Ly, Nx, Ny, dt, T, version=v)
        x, y = solver.get_mesh()

        u = solver.get_solution()

        while solver.n < solver.Nt:
            solver.advance()
            t = solver.t[solver.n]

        u_e = problem.u_e(x[:, np.newaxis], y[np.newaxis, :], t)
        u = solver.get_solution()
        diff = abs(u - u_e).max()
        print "%10s: abs(u-u_e).max() = %e" % (v, diff)
        nt.assert_almost_equal(diff, 0, places=12)