Exemplo n.º 1
0
def test_optimizer_verbose():
    import sys, io
    prev_stdout = sys.stdout
    sys.stdout = io.StringIO("")
    pg.optimize(loss=lambda p: (p[0]-2)**2+(p[1]-1)**4, max_vals=[5, 5], parameter_tol=1.E-8, verbose=True)
    output = sys.stdout.getvalue()
    sys.stdout = prev_stdout
    assert len(output) > 0
Exemplo n.º 2
0
def test_optimizer():
    # https://en.wikipedia.org/wiki/Test_functions_for_optimization

    # a simple function
    p = pg.optimize(loss=lambda p: (p[0] - 2)**2 + (p[1] - 1)**4,
                    max_vals=[5, 5],
                    parameter_tol=1.E-8)
    assert abs(p[0] - 2) < 1.E-6
    assert abs(p[1] - 1) < 1.E-6

    # a simple function with redundant inputs and tol instead of parameter tolerance
    p = pg.optimize(loss=lambda p: (p[0] - 2)**2 + (p[1] - 1)**4,
                    max_vals=[5, 5, 5],
                    min_vals=[0, 0, 5],
                    deviation_tol=1.E-6,
                    divide_range="shrinking")
    assert abs(p[0] - 2) < 1.E-3
    assert abs(p[1] - 1) < 1.E-3

    # Beale function
    p = pg.optimize(loss=lambda p: (1.5 - p[0] + p[0] * p[1])**2 +
                    (2.25 - p[0] + p[0] * p[1]**2)**2 +
                    (2.625 - p[0] + p[0] * p[1]**3)**2,
                    max_vals=[4.5, 4.5],
                    min_vals=[-4.5, -4.5],
                    parameter_tol=1.E-8)
    assert abs(p[0] - 3) < 1.E-6
    assert abs(p[1] - 0.5) < 1.E-6

    # Booth function
    p = pg.optimize(loss=lambda p: (p[0] + 2 * p[1] - 7)**2 +
                    (2 * p[0] + p[1] - 5)**2,
                    max_vals=[10, 10],
                    min_vals=[-10, -10],
                    parameter_tol=1.E-8)
    assert abs(p[0] - 1) < 1.E-6
    assert abs(p[1] - 3) < 1.E-6

    # Beale function with depth instead of small divide range
    p = pg.optimize(loss=lambda p: (1.5 - p[0] + p[0] * p[1])**2 +
                    (2.25 - p[0] + p[0] * p[1]**2)**2 +
                    (2.625 - p[0] + p[0] * p[1]**3)**2,
                    max_vals=[4.5, 4.5],
                    min_vals=[-4.5, -4.5],
                    parameter_tol=1.E-8,
                    divide_range=2,
                    depth=100)
    assert abs(p[0] - 3) < 1.E-6
    assert abs(p[1] - 0.5) < 1.E-6
Exemplo n.º 3
0
def overlapping_community_detection(graph, known_members, top=None):
    graph_filter = pg.PageRank(
        0.9) if len(known_members) < 50 else pg.ParameterTuner().tune(
            graph, known_members)
    ranks = pg.to_signal(graph,
                         {v: 1
                          for v in known_members
                          }) >> pg.Sweep(graph_filter) >> pg.Normalize("range")
    if top is not None:
        ranks = ranks * (1 - pg.to_signal(graph, {v: 1
                                                  for v in known_members})
                         )  # set known member scores to zero
        return sorted(list(graph), key=lambda node: -ranks[node]
                      )[:top]  # return specific number of top predictions

    threshold = pg.optimize(max_vals=[1],
                            loss=lambda p: pg.Conductance(graph)
                            (pg.Threshold(p[0]).transform(ranks)))[0]
    known_members = set(known_members)
    return [
        v for v in graph if ranks[v] > threshold and v not in known_members
    ]
Exemplo n.º 4
0
def test_optimizer():
    # https://en.wikipedia.org/wiki/Test_functions_for_optimization

    for optimizer in [pg.optimize, pg.nelder_mead]:
        # a simple function
        p = pg.optimize(loss=lambda p: (p[0]-2)**2+(p[1]-1)**4, max_vals=[5, 5], parameter_tol=1.E-8, verbose=False)
        assert abs(p[0]-2) < 1.E-6
        assert abs(p[1]-1) < 1.E-6

        # a simple function
        p = pg.optimize(loss=lambda p: (p[0]-2)**2+(p[1]-1)**4, max_vals=[5, 5], parameter_tol=1.E-8, verbose=False,
                        partition_strategy="step", partitions=0.01)
        assert abs(p[0]-2) < 1.E-6
        assert abs(p[1]-1) < 1.E-6

        # a simple function
        p = pg.optimize(loss=lambda p: (p[0]-2)**2+(p[1]-1)**4, max_vals=[5, 5], parameter_tol=1.E-8, verbose=False,
                        partition_strategy="step", partitions=0.01, randomize=True)
        assert abs(p[0]-2) < 1.E-6
        assert abs(p[1]-1) < 1.E-6

        # a simple function with redundant inputs and tol instead of parameter tolerance
        p = pg.optimize(loss=lambda p: (p[0]-2)**2+(p[1]-1)**4,
                        max_vals=[5, 5, 5], min_vals=[0, 0, 5], deviation_tol=1.E-6, shrink_strategy="shrinking", verbose=False)
        assert abs(p[0]-2) < 1.E-1
        assert abs(p[1]-1) < 1.E-1
        # TODO: check why shrinking is not as good

        # Beale function
        beale = lambda p: (1.5-p[0]+p[0]*p[1])**2+(2.25-p[0]+p[0]*p[1]**2)**2+(2.625-p[0]+p[0]*p[1]**3)**2
        p = pg.optimize(loss=beale,
                        max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5], parameter_tol=1.E-8, verbose=False)
        assert abs(p[0]-3) < 1.E-6
        assert abs(p[1]-0.5) < 1.E-6

        # noisy Beale function
        from random import random, seed
        seed(0)
        noisy_beale = lambda p: (1.5 - p[0] + p[0] * p[1]) ** 2 + (2.25 - p[0] + p[0] * p[1] ** 2) ** 2 + (
                    2.625 - p[0] + p[0] * p[1] ** 3) ** 2 + random()
        p = pg.optimize(loss=noisy_beale,
                        validation_loss=beale,
                        max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5], parameter_tol=1.E-8, verbose=False)
        assert abs(p[0] - 3) < .1
        assert abs(p[1] - 0.5) < .1

        # Beale function with nelder mead
        p = pg.nelder_mead(loss=beale,
                        max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5], parameter_tol=1.E-8, verbose=False)
        assert abs(p[0] - 3) < 1.E-6
        assert abs(p[1] - 0.5) < 1.E-6

        # Beale function with lbfgsb
        p = pg.lbfgsb(loss=beale,
                        max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5], parameter_tol=1.E-8, verbose=False)
        assert abs(p[0] - 3) < 1.E-6
        assert abs(p[1] - 0.5) < 1.E-6

        # Beale function
        p = pg.optimize(loss=beale,
                        max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5], parameter_tol=1.E-8, verbose=False)

        # Booth function
        p = pg.optimize(loss=lambda p: (p[0]+2*p[1]-7)**2+(2*p[0]+p[1]-5)**2,
                        max_vals=[10, 10], min_vals=[-10, -10], parameter_tol=1.E-6, verbose=False)
        assert abs(p[0] - 1) < 1.E-6
        assert abs(p[1] - 3) < 1.E-6

        # Beale function with depth instead of small divide range
        p = pg.optimize(loss=lambda p: (1.5-p[0]+p[0]*p[1])**2+(2.25-p[0]+p[0]*p[1]**2)**2+(2.625-p[0]+p[0]*p[1]**3)**2,
                        max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5], parameter_tol=1.E-8, divide_range=2, depth=100, verbose=False)
        assert abs(p[0] - 3) < 1.E-6
        assert abs(p[1] - 0.5) < 1.E-6
"""This is an example script to verify the efficacy of the library's optimizer on the Beale function.
Related tests also implement the same functionality."""
import pygrank as pg

# Beale function
p = pg.optimize(loss=lambda p: (p[0] - 2) ** 2 + (p[1] - 1) ** 4, max_vals=[5, 5], parameter_tol=1.E-8)
print(p)
assert abs(p[0] - 2) < 1.E-6
assert abs(p[1] - 1) < 1.E-6