コード例 #1
0
def test_simple_nonconj():
    rng = irm.RNG()
    irm_model = irmio.create_model_from_data(data_simple_nonconj, rng=rng)
    
    irmio.set_model_latent(irm_model, latent_simple_nonconj, rng=rng)
    
    a = irm_model.domains['d1'].get_assignments()
    axes = irm_model.relations['R1'].get_axes()
    axes_objs = [(irm_model.domains[dn], irm_model.domains[dn].get_relation_pos('R1')) 
                 for dn in axes]

    comps = model.get_components_in_relation(axes_objs,
                                             irm_model.relations['R1'])

    g0 = a[0]
    g1 = a[2]
    g2 = a[4]

    assert_approx_equal(comps[g0, g0]['p'], 0.0)
    assert_approx_equal(comps[g0, g1]['p'], 0.01)
    assert_approx_equal(comps[g0, g2]['p'], 0.02)

    assert_approx_equal(comps[g1, g0]['p'], 0.1)
    assert_approx_equal(comps[g1, g1]['p'], 0.11)
    assert_approx_equal(comps[g1, g2]['p'], 0.12)


    assert_approx_equal(comps[g2, g0]['p'], 0.2)
    assert_approx_equal(comps[g2, g1]['p'], 0.21)
    assert_approx_equal(comps[g2, g2]['p'], 0.22)
コード例 #2
0
ファイル: test_irmio.py プロジェクト: ericmjonas/netmotifs
def test_simple_nonconj():
    rng = irm.RNG()
    irm_model = irmio.create_model_from_data(data_simple_nonconj, rng=rng)

    irmio.set_model_latent(irm_model, latent_simple_nonconj, rng=rng)

    a = irm_model.domains["d1"].get_assignments()
    axes = irm_model.relations["R1"].get_axes()
    axes_objs = [(irm_model.domains[dn], irm_model.domains[dn].get_relation_pos("R1")) for dn in axes]

    comps = model.get_components_in_relation(axes_objs, irm_model.relations["R1"])

    g0 = a[0]
    g1 = a[2]
    g2 = a[4]

    assert_approx_equal(comps[g0, g0]["p"], 0.0)
    assert_approx_equal(comps[g0, g1]["p"], 0.01)
    assert_approx_equal(comps[g0, g2]["p"], 0.02)

    assert_approx_equal(comps[g1, g0]["p"], 0.1)
    assert_approx_equal(comps[g1, g1]["p"], 0.11)
    assert_approx_equal(comps[g1, g2]["p"], 0.12)

    assert_approx_equal(comps[g2, g0]["p"], 0.2)
    assert_approx_equal(comps[g2, g1]["p"], 0.21)
    assert_approx_equal(comps[g2, g2]["p"], 0.22)
コード例 #3
0
def test_slice_nonconj():
    T1_N = 10
    T2_N = 20
    np.random.seed(0)
    rng = irm.RNG()

    data = np.random.rand(T1_N, T2_N) > 0.5
    data.shape = T1_N, T2_N

    m =  models.BetaBernoulliNonConj()
    r = irm.Relation([('T1', T1_N), ('T2', T2_N)], 
                     data,m)
    hps = m.create_hps()
    hps['alpha'] = 1.0
    hps['beta'] = 1.0

    r.set_hps(hps)

    tf_1 = model.DomainInterface(T1_N, {'r': ('T1', r)})
    tf_1.set_hps({'alpha' : 1.0})
    tf_2 = model.DomainInterface(T2_N, {'r' : ('T2', r)})
    tf_2.set_hps({'alpha' : 1.0})

    T1_GRPN = 4
    t1_assign = np.arange(T1_N) % T1_GRPN
    t1_grps = {}
    for i, gi in enumerate(t1_assign):
        if gi not in t1_grps:
            g = tf_1.create_group(rng)
            t1_grps[gi] = g
        tf_1.add_entity_to_group(t1_grps[gi], i)

    T2_GRPN = 4
    t2_assign = np.arange(T2_N) % T2_GRPN
    t2_grps = {}
    for i, gi in enumerate(t2_assign):
        if gi not in t2_grps:
            g = tf_2.create_group(rng)
            t2_grps[gi] = g
        tf_2.add_entity_to_group(t2_grps[gi], i)


    t1_assign_g = tf_1.get_assignments()
    t2_assign_g = tf_2.get_assignments()

    # build list of coords / heads/tails
    coord_data = {}
    for t1_g in np.unique(t1_assign_g):
        for t2_g in np.unique(t2_assign_g):
            t1_entities = np.argwhere(t1_assign_g == t1_g).flatten()
            t2_entities = np.argwhere(t2_assign_g == t2_g).flatten()
            
            dps = []
            for e1 in t1_entities:
                for e2 in t2_entities:
                    dps.append(data[e1, e2])
            heads = np.sum(np.array(dps)==1)
            tails = np.sum(np.array(dps)==0)
            # coords = ((tf_1.get_relation_groupid(0, t1_g), 
            #            tf_2.get_relation_groupid(0, t2_g)))
            coord_data[(t1_g, t2_g)] = (heads, tails)

    # get all the components from this relation
    # now the histograms

    for alpha, beta in [(1.0, 1.0), (10.0, 1.0), 
                        (1.0, 10.0),(0.1, 5.0)]:
        coords_hist = {k : [] for k in coord_data}

        print "alpha=", alpha, "beta=", beta, "="*50
        hps['alpha'] = alpha
        hps['beta'] = beta

        r.set_hps(hps)

        ITERS = 100000
        for i in range(ITERS):
            r.apply_comp_kernel("slice_sample", rng, {'width' : 0.4})

            component_data = model.get_components_in_relation([(tf_1, 0), 
                                                            (tf_2, 0)], 
                                                              r)

            for c in coord_data:
                coords_hist[c].append(component_data[c]['p'])
        for c in coords_hist:
            heads, tails = coord_data[c]
            empirical_p = np.mean(coords_hist[c])
            true_map_p = float(heads + alpha) / (heads +tails + alpha + beta)
            print empirical_p - true_map_p
            np.testing.assert_approx_equal(empirical_p, true_map_p, 2)