예제 #1
0
def test_shuffled_edges():

    d = copy.deepcopy(d_original)
    original_dwell = json.loads(arbplf_dwell(json.dumps(d)))

    d = copy.deepcopy(d_original)
    original_ll = json.loads(arbplf_ll(json.dumps(d)))

    d = copy.deepcopy(d_original)
    d['site_reduction'] = {'aggregation' : 'sum'}
    original_em_update = json.loads(arbplf_em_update(json.dumps(d)))

    iter_count = 10
    for i in range(iter_count):
        d_shuffled, perm = _shuffle_edges(d_original)

        # the ll output does not have an edge column
        d = copy.deepcopy(d_shuffled)
        ll = json.loads(arbplf_ll(json.dumps(d)))
        assert_equal(ll, original_ll)

        d = copy.deepcopy(d_shuffled)
        dwell = json.loads(arbplf_dwell(json.dumps(d)))
        dwell_prime = _perm_output_edges(dwell, perm)
        dwell_prime['data'].sort()
        assert_equal(dwell_prime, original_dwell)

        d = copy.deepcopy(d_shuffled)
        d['site_reduction'] = {'aggregation' : 'sum'}
        em_update = json.loads(arbplf_em_update(json.dumps(d)))
        em_update_prime = _perm_output_edges(em_update, perm)
        em_update_prime['data'].sort()
        assert_equal(em_update_prime, original_em_update)
예제 #2
0
def test_shuffled_nodes():

    d = copy.deepcopy(d_original)
    original_dwell = json.loads(arbplf_dwell(json.dumps(d)))

    d = copy.deepcopy(d_original)
    original_ll = json.loads(arbplf_ll(json.dumps(d)))

    d = copy.deepcopy(d_original)
    d['site_reduction'] = {'aggregation' : 'sum'}
    original_em_update = json.loads(arbplf_em_update(json.dumps(d)))

    iter_count = 10
    for i in range(iter_count):
        d_shuffled = _shuffle_nodes(d_original)

        d = copy.deepcopy(d_shuffled)
        dwell = json.loads(arbplf_dwell(json.dumps(d)))
        assert_equal(dwell, original_dwell)

        d = copy.deepcopy(d_shuffled)
        ll = json.loads(arbplf_ll(json.dumps(d)))
        assert_equal(ll, original_ll)

        d = copy.deepcopy(d_shuffled)
        d['site_reduction'] = {'aggregation' : 'sum'}
        em_update = json.loads(arbplf_em_update(json.dumps(d)))
        assert_equal(em_update, original_em_update)
예제 #3
0
def test_heterogeneous_edge_rates():
    # try changing one of the edge rate coefficients
    d = {
        "model_and_data": {
            "edges": [[0, 1], [1, 2]],
            "edge_rate_coefficients": [1, 2],
            "rate_matrix": [[0, 1], [0, 0]],
            "probability_array": [[[1, 0], [1, 1], [1, 0]]]
        },
        "site_reduction": {
            "aggregation": "only"
        }
    }

    actual_marginal = json.loads(arbplf_marginal(json.dumps(d)))
    assert_equal(actual_marginal, desired_marginal)

    g = copy.deepcopy(d)
    g['trans_reduction'] = dict(selection=[[0, 1], [1, 0]])
    actual_trans = json.loads(arbplf_trans(json.dumps(g)))
    assert_equal(actual_trans, desired_trans)

    actual_ll = json.loads(arbplf_ll(json.dumps(d)))
    desired_ll = {"columns": ["value"], "data": [[-3.0]]}
    assert_equal(actual_ll, desired_ll)

    actual_em_update = json.loads(arbplf_em_update(json.dumps(d)))
    assert_equal(actual_em_update, desired_em_update)

    actual_dwell = json.loads(arbplf_dwell(json.dumps(d)))
    assert_equal(actual_dwell, desired_dwell)
예제 #4
0
def test_edges_are_not_preordered():
    # Try switching the order of the edges in the input
    # and increasing the birth rate in the rate matrix.
    d = {
        "model_and_data": {
            "edges": [[1, 2], [0, 1]],
            "edge_rate_coefficients": [1, 2],
            "rate_matrix": [[0, 2], [0, 0]],
            "probability_array": [[[1, 0], [1, 1], [1, 0]]]
        },
        "site_reduction": {
            "aggregation": "only"
        }
    }

    actual_marginal = json.loads(arbplf_marginal(json.dumps(d)))
    assert_equal(actual_marginal, desired_marginal)

    g = copy.deepcopy(d)
    g['trans_reduction'] = dict(selection=[[0, 1], [1, 0]])
    actual_trans = json.loads(arbplf_trans(json.dumps(g)))
    assert_equal(actual_trans, desired_trans)

    actual_ll = json.loads(arbplf_ll(json.dumps(d)))
    desired_ll = {"columns": ["value"], "data": [[-6.0]]}
    assert_equal(actual_ll, desired_ll)

    actual_em_update = json.loads(arbplf_em_update(json.dumps(d)))
    assert_equal(actual_em_update, desired_em_update)

    actual_dwell = json.loads(arbplf_dwell(json.dumps(d)))
    assert_equal(actual_dwell, desired_dwell)
예제 #5
0
def main():
    np.random.seed(123475)

    # sample a random rate matrix
    state_count = 3
    edge_count = 3
    node_count = edge_count + 1
    #Q = sample_rate_matrix(state_count)
    Q = sample_reversible_rate_matrix(state_count)
    p = equilibrium(Q)
    expected_rate = -p.dot(np.diag(Q))
    print('expected rate:', expected_rate)
    Q = Q / expected_rate
    np.fill_diagonal(Q, 0)
    # use ad hoc data
    probability_array = [[[1, 1, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0]],
                         [[1, 1, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]],
                         [[1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]]
    site_weights = [.7, .2, .1]
    edges = [[0, 1], [0, 2], [0, 3]]
    coefficients = [.01, .01, .01]
    d = {
        "model_and_data": {
            "edges": edges,
            "edge_rate_coefficients": coefficients,
            "rate_matrix": Q.tolist(),
            "probability_array": probability_array
        },
        "site_reduction": {
            "aggregation": site_weights
        }
    }
    print(d)
    for i in range(100):
        s = arbplf_em_update(json.dumps(d))
        df = pd.read_json(StringIO(s), orient='split', precise_float=True)
        y = df.value.values.tolist()
        d['model_and_data']['edge_rate_coefficients'] = y
        print('coefficients updated by EM:', y)
    s = arbplf_newton_refine(json.dumps(d))
    df = pd.read_json(StringIO(s), orient='split', precise_float=True)
    y = df.value.values.tolist()
    print('coefficients updated by newton refinement:', y)

    d['trans_reduction'] = {
        'selection': [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]],
        'aggregation': 'sum'
    }
    d['model_and_data']['edge_rate_coefficients'] = y

    s = arbplf_trans(json.dumps(d))
    df = pd.read_json(StringIO(s), orient='split', precise_float=True)
    y = df.value.values.tolist()
    print('conditionally expected transition counts:', y)
예제 #6
0
def main():
    np.random.seed(123475)

    # sample a random rate matrix
    state_count = 3
    edge_count = 3
    node_count = edge_count + 1
    # Q = sample_rate_matrix(state_count)
    Q = sample_reversible_rate_matrix(state_count)
    p = equilibrium(Q)
    expected_rate = -p.dot(np.diag(Q))
    print("expected rate:", expected_rate)
    Q = Q / expected_rate
    np.fill_diagonal(Q, 0)
    # use ad hoc data
    probability_array = [
        [[1, 1, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0]],
        [[1, 1, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]],
        [[1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
    ]
    site_weights = [0.7, 0.2, 0.1]
    edges = [[0, 1], [0, 2], [0, 3]]
    coefficients = [0.01, 0.01, 0.01]
    d = {
        "model_and_data": {
            "edges": edges,
            "edge_rate_coefficients": coefficients,
            "rate_matrix": Q.tolist(),
            "probability_array": probability_array,
        },
        "site_reduction": {"aggregation": site_weights},
    }
    print(d)
    for i in range(100):
        s = arbplf_em_update(json.dumps(d))
        df = pd.read_json(StringIO(s), orient="split", precise_float=True)
        y = df.value.values.tolist()
        d["model_and_data"]["edge_rate_coefficients"] = y
        print("coefficients updated by EM:", y)
    s = arbplf_newton_refine(json.dumps(d))
    df = pd.read_json(StringIO(s), orient="split", precise_float=True)
    y = df.value.values.tolist()
    print("coefficients updated by newton refinement:", y)

    d["trans_reduction"] = {"selection": [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]], "aggregation": "sum"}
    d["model_and_data"]["edge_rate_coefficients"] = y

    s = arbplf_trans(json.dumps(d))
    df = pd.read_json(StringIO(s), orient="split", precise_float=True)
    y = df.value.values.tolist()
    print("conditionally expected transition counts:", y)