Пример #1
0
def main():
    np.random.seed(123475)

    # sample a random rate matrix
    state_count = 3
    edge_count = 3
    node_count = edge_count + 1
    #Q = sample_rate_matrix(state_count)
    Q = sample_reversible_rate_matrix(state_count)
    p = equilibrium(Q)
    expected_rate = -p.dot(np.diag(Q))
    print('expected rate:', expected_rate)
    Q = Q / expected_rate
    np.fill_diagonal(Q, 0)
    # use ad hoc data
    probability_array = [[[1, 1, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0]],
                         [[1, 1, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]],
                         [[1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]]
    site_weights = [.7, .2, .1]
    edges = [[0, 1], [0, 2], [0, 3]]
    coefficients = [.01, .01, .01]
    d = {
        "model_and_data": {
            "edges": edges,
            "edge_rate_coefficients": coefficients,
            "rate_matrix": Q.tolist(),
            "probability_array": probability_array
        },
        "site_reduction": {
            "aggregation": site_weights
        }
    }
    print(d)
    for i in range(100):
        s = arbplf_em_update(json.dumps(d))
        df = pd.read_json(StringIO(s), orient='split', precise_float=True)
        y = df.value.values.tolist()
        d['model_and_data']['edge_rate_coefficients'] = y
        print('coefficients updated by EM:', y)
    s = arbplf_newton_refine(json.dumps(d))
    df = pd.read_json(StringIO(s), orient='split', precise_float=True)
    y = df.value.values.tolist()
    print('coefficients updated by newton refinement:', y)

    d['trans_reduction'] = {
        'selection': [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]],
        'aggregation': 'sum'
    }
    d['model_and_data']['edge_rate_coefficients'] = y

    s = arbplf_trans(json.dumps(d))
    df = pd.read_json(StringIO(s), orient='split', precise_float=True)
    y = df.value.values.tolist()
    print('conditionally expected transition counts:', y)
Пример #2
0
def main():
    np.random.seed(123475)

    # sample a random rate matrix
    state_count = 3
    edge_count = 3
    node_count = edge_count + 1
    # Q = sample_rate_matrix(state_count)
    Q = sample_reversible_rate_matrix(state_count)
    p = equilibrium(Q)
    expected_rate = -p.dot(np.diag(Q))
    print("expected rate:", expected_rate)
    Q = Q / expected_rate
    np.fill_diagonal(Q, 0)
    # use ad hoc data
    probability_array = [
        [[1, 1, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0]],
        [[1, 1, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]],
        [[1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
    ]
    site_weights = [0.7, 0.2, 0.1]
    edges = [[0, 1], [0, 2], [0, 3]]
    coefficients = [0.01, 0.01, 0.01]
    d = {
        "model_and_data": {
            "edges": edges,
            "edge_rate_coefficients": coefficients,
            "rate_matrix": Q.tolist(),
            "probability_array": probability_array,
        },
        "site_reduction": {"aggregation": site_weights},
    }
    print(d)
    for i in range(100):
        s = arbplf_em_update(json.dumps(d))
        df = pd.read_json(StringIO(s), orient="split", precise_float=True)
        y = df.value.values.tolist()
        d["model_and_data"]["edge_rate_coefficients"] = y
        print("coefficients updated by EM:", y)
    s = arbplf_newton_refine(json.dumps(d))
    df = pd.read_json(StringIO(s), orient="split", precise_float=True)
    y = df.value.values.tolist()
    print("coefficients updated by newton refinement:", y)

    d["trans_reduction"] = {"selection": [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]], "aggregation": "sum"}
    d["model_and_data"]["edge_rate_coefficients"] = y

    s = arbplf_trans(json.dumps(d))
    df = pd.read_json(StringIO(s), orient="split", precise_float=True)
    y = df.value.values.tolist()
    print("conditionally expected transition counts:", y)
Пример #3
0
def main():
    xs = np.linspace(1e-5, 1, 100)
    ts = -2 * np.log(xs)
    arr = []
    for i, t in enumerate(ts):
        s = arbplf_ll(get_json_input(t))
        df = pd.read_json(StringIO(s), orient='split', precise_float=True)
        arr.append(df.value.values[0])
    lines = plt.plot(xs, arr, 'blue')
    plt.ylabel("log likelihood")
    plt.xlabel("x = exp(-0.5 t)")
    plt.savefig('out00.svg', transparent=True)

    # local optima
    for i, t in enumerate((0.1, 6.0)):
        s = arbplf_newton_refine(get_json_input(t))
        df = pd.read_json(StringIO(s), orient='split', precise_float=True)
        u = df.value.values[0]
        print('local optimum', i, ':')
        print('  initial guess:', t)
        print('  refined isolated interior local optimum:')
        print('    t = {:.16}'.format(u))
        print('    x =', np.exp(-0.5 * u))