コード例 #1
0
def infinity_loss(Domain, Start):
    # Set Method
    Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-3)

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000,
                       Tols=[(Domain.gap_rplus, 1e-6 * gap_0)],
                       verbose=False)
    Repo = Reporting(Requests=[Domain.gap_rplus, 'Data', Domain.F])
    Misc = Miscellaneous(Timer=False)
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Start Solver
    SOI_Results = Solve(Start, Method, Domain, Options)

    # Record X_Star
    Data = SOI_Results.PermStorage['Data']
    dx = np.diff(Data, axis=0)
    F = SOI_Results.PermStorage[Domain.F][:-1]

    return -np.sum(F * dx)
コード例 #2
0
def Demo():

    # __APPROXIMATE_LINEAR_FIELD__##############################################

    # Load Dummy Data
    X = np.random.rand(1000, 2) * 2 - 1
    scale = np.array([0.8, 1.2])
    y = 0.5 * np.sum(scale * X**2, axis=1)

    # Construct Field
    ALF = ApproxLF(X=X, dy=y, eps=1e-8)

    # Set Method
    # Method = Euler(Domain=ALF,FixStep=True)
    Method = HeunEuler(Domain=ALF, Delta0=1e-2)

    # Initialize Starting Field
    A = np.array([[0, 1], [-1, 0]])
    # A = np.eye(LF.XDim)
    # A = np.random.rand(LF.XDim,LF.XDim)
    # A = np.array([[5,0.],[0.,5]])
    b = np.zeros(ALF.XDim)
    Start = np.hstack([A.flatten(), b])
    print(Start)

    # Set Options
    Init = Initialization(Step=-1.0)
    Term = Termination(MaxIter=1000)  #,Tols=[(LF.error,1e-10)])
    Repo = Reporting(Requests=[ALF.error, 'Step', 'F Evaluations', 'Data'])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(ALF, Method, Options)

    # Start Solver
    tic = time.time()
    Results = Solve(Start, Method, ALF, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, Results, Method, toc)

    error = np.asarray(Results.PermStorage[ALF.error])
    params = Results.PermStorage['Data'][-1]

    A, b = ALF.UnpackFieldParams(params)

    print(A)
    print(b)
    print(error[-1])
コード例 #3
0
ファイル: Demo_MovieLens.py プロジェクト: gart17/VI-Solver
def score_matrixfac(train, test, mask, step=1e-5, iters=100, k=500):
    # Define Domain
    n, d = train.shape
    sh_P = (n, k)
    sh_Q = (d, k)
    Domain = MatrixFactorization(Data=train, sh_P=sh_P, sh_Q=sh_Q)

    # Set Method
    # Method = Euler(Domain=Domain,FixStep=True)
    Method = HeunEuler(Domain=Domain, Delta0=1e-1, MinStep=1e-7, MaxStep=1e-2)

    # Initialize Starting Point
    globalmean = train.sum() / train.nnz
    scale = np.sqrt(globalmean / k)
    # P = np.random.rand(n,k)
    # Q = np.random.rand(d,k)
    P = scale * np.ones(sh_P)
    Q = scale * np.ones(sh_Q)
    Start = np.hstack((P.flatten(), Q.flatten()))

    # Set Options
    Init = Initialization(Step=step)
    Term = Termination(MaxIter=iters)
    Repo = Reporting(Requests=['Step', 'F Evaluations'])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, Results, Method, toc)

    # Retrieve result
    parameters = np.asarray(Results.TempStorage['Data'][-1])
    pred = Domain.predict(parameters)

    return rmse(pred, test, mask)
コード例 #4
0
ファイル: Demo_MovieLens.py プロジェクト: gart17/VI-Solver
def score_svdmethod(train,
                    test,
                    mask,
                    tau=6e3,
                    step=1.9,
                    fixstep=True,
                    iters=250):
    # Define Domain
    Domain = SVDMethod(Data=train, tau=tau)

    # Set Method
    # Method = Euler(Domain=Domain,FixStep=fixstep)
    Method = HeunEuler(Domain=Domain, Delta0=1e2, MinStep=1e0, MaxStep=1e3)

    # Initialize Starting Point
    # globalmean = train.sum()/train.nnz
    # Start = globalmean*np.ones(train.shape)
    Start = np.zeros(train.shape).flatten()

    # Set Options
    Init = Initialization(Step=step)
    Term = Termination(MaxIter=iters, Tols=[(Domain.rel_error, 0.2)])
    Repo = Reporting(Requests=[Domain.rel_error, 'Step', 'F Evaluations'])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, Results, Method, toc)

    # Retrieve result
    Y = np.asarray(Results.TempStorage['Data'][-1]).reshape(train.shape)
    pred = Domain.shrink(Y, Domain.tau)

    return rmse(pred, test, mask), Results.PermStorage[Domain.rel_error]
コード例 #5
0
def Demo():

    #__MHPH__##################################################

    trials = range(1000,8000+1,2000)
    MHPH_Results = [[] for i in trials]

    for n in trials:

        #Define Dimension and Domain
        Domain = MHPH(Dim=n)

        # Set Method
        Method = HeunEuler(Domain=Domain,P=EntropicProjection(),Delta0=1e-1)
        # Method = RipCurl(Domain=Domain,P=EntropicProjection(),factor=0.1,FixStep=True)

        # Set Options
        Init = Initialization(Step=-1)
        Term = Termination(MaxIter=100,Tols=[[Domain.gap_simplex,1e-3]])
        Repo = Reporting(Requests=[Domain.gap_simplex])
        Misc = Miscellaneous()
        Options = DescentOptions(Init,Term,Repo,Misc)

        #Initialize Starting Point
        Start = np.ones(Domain.Dim)/np.double(Domain.Dim)

        # Print Stats
        PrintSimStats(Domain,Method,Options)

        tic = time.time()
        ind = int(n/2000)-4
        MHPH_Results[ind] = Solve(Start,Method,Domain,Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options,MHPH_Results[ind],Method,toc)
コード例 #6
0
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 10  # number of possible maps
    T = 1000  # number of time steps
    eta = .01  # learning rate
    Ot = 0  # reference vector will be origin for all maps

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    for n in range(N):
        # Create Domain
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=n)
        Domain = SOI(Network=Network, alpha=2)

        # Record Domain
        Domains += [Domain]

        # Set Method
        Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-3)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,
                           Tols=[(Domain.gap_rplus, 1e-6 * gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, SOI_Results, Method, toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
    X_Stars = np.asarray(X_Stars)
    X_Opt = np.mean(X_Stars, axis=0)
    Ot = Ot * np.ones(X_Stars.shape[1])

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])

    # Select First Domain
    idx = np.argmax(np.linalg.norm(X_Stars - X, axis=1))

    distances = []
    loss_infs = []
    regret_standards = []
    regret_news = []
    ts = range(T)
    for t in ts:
        print('t = ' + str(t))
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium
        equi = X_Stars[idx]
        # calculate distance
        distances += [np.linalg.norm(equi - X)]
        # calculate infinity loss
        loss_infs += [infinity_loss(Domain, X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain, LineContour(Ot, X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain, LineContour(Ot, X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain, LineContour(X_Opt, X))
        regret_news += [integral(ci_new)]
        # update prediction
        X = BoxProjection(lo=0).P(X, -eta, Domain.F(X))
        # update domain
        idx = np.argmax(np.linalg.norm(X_Stars - X, axis=1))
    ts_p1 = range(1, T + 1)
    distances_avg = np.divide(distances, ts_p1)
    loss_infs_avg = np.divide(loss_infs, ts_p1)
    regret_standards_avg = np.divide(regret_standards, ts_p1)
    regret_news_avg = np.divide(regret_news, ts_p1)

    np.savez_compressed('NoRegret.npz',
                        d_avg=distances_avg,
                        linf_avg=loss_infs_avg,
                        rs_avg=regret_standards_avg,
                        rn_avg=regret_news_avg)

    plt.subplot(2, 1, 1)
    plt.plot(ts, distances_avg, 'k', label='Average Distance')
    plt.title('Demonstration of No-Regret on MLN')
    plt.ylabel('Euclidean Distance')
    plt.legend()

    plt.subplot(2, 1, 2)
    plt.plot(ts, loss_infs_avg, 'k--', label=r'loss$_{\infty}$')
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0, T])
    plt.ylim([-500, 5000])
    plt.legend()

    plt.savefig('NoRegret')
コード例 #7
0
def Demo():

    # __POWER_ITERATION__##################################################

    # Define Domain
    A = np.asarray([[-4, 10], [7, 5]])
    A = A.dot(A)  # symmetrize
    # mars = np.load('big_means.npy')
    # A = mars.T.dot(mars)
    eigs = np.linalg.eigvals(A)
    rho = max(eigs) - min(eigs)
    rank = np.count_nonzero(eigs)
    # Domain = PowerIteration(A=A)
    Domain = Rayleigh(A=A)

    # Set Method
    Method_Standard = Euler(Domain=Domain,
                            FixStep=True,
                            P=NormBallProjection())

    # Initialize Starting Point
    Start = np.ones(Domain.Dim)

    # Set Options
    Init = Initialization(Step=-1e-3)
    Term = Termination(MaxIter=100, Tols=[(Domain.res_norm, 1e-6)])
    Repo = Reporting(Requests=[
        Domain.res_norm, 'Step', 'F Evaluations', 'Projections', 'Data'
    ])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method_Standard, Options)

    # Start Solver
    tic = time.time()
    Results_Standard = Solve(Start, Method_Standard, Domain, Options)
    toc_standard = time.time() - tic

    # Print Results
    PrintSimResults(Options, Results_Standard, Method_Standard, toc_standard)

    # data_standard = Results_Standard.PermStorage['Data']
    # eigval_standard = (A.dot(data_standard[-1])/data_standard[-1]).mean()
    # eigvec_standard = data_standard[-1]
    res_standard = Results_Standard.PermStorage[Domain.res_norm]

    # Set Method
    # Method_CK = CashKarp(Domain=Domain,Delta0=1e-4,P=NormBallProjection())
    Method_CK = HeunEuler(Domain=Domain, Delta0=1e-4, P=NormBallProjection())

    # Print Stats
    PrintSimStats(Domain, Method_CK, Options)

    # Start Solver
    tic = time.time()
    Results_CK = Solve(Start, Method_CK, Domain, Options)
    toc_CK = time.time() - tic

    # Print Results
    PrintSimResults(Options, Results_CK, Method_CK, toc_CK)

    # data_CK = Results_CK.PermStorage['Data']
    # eigval_CK = (A.dot(data_CK[-1])/data_CK[-1]).mean()
    # eigvec_CK = data_CK[-1]
    res_CK = Results_CK.PermStorage[Domain.res_norm]

    # Set Method
    # Method_CKPS = CashKarp_PhaseSpace(Domain=Domain,Delta0=1e-4,
    #                                   P=NormBallProjection())
    Method_CKPS = HeunEuler_PhaseSpace(Domain=Domain,
                                       Delta0=1e-1,
                                       P=NormBallProjection())

    # Print Stats
    PrintSimStats(Domain, Method_CKPS, Options)

    # Start Solver
    tic = time.time()
    Results_CKPS = Solve(Start, Method_CKPS, Domain, Options)
    toc_CKPS = time.time() - tic

    # Print Results
    PrintSimResults(Options, Results_CK, Method_CK, toc_CKPS)

    # data_CKPS = Results_CKPS.PermStorage['Data']
    # eigval_CKPS = (A.dot(data_CKPS[-1])/data_CKPS[-1]).mean()
    # eigvec_CKPS = data_CKPS[-1]
    res_CKPS = Results_CKPS.PermStorage[Domain.res_norm]

    # tic = time.time()
    # eigval_NP, eigvec_NP = eigh(A,eigvals=(Domain.Dim-1,Domain.Dim-1))
    # toc_NP = time.time() - start

    # Plot Results
    fig = plt.figure()
    ax = fig.add_subplot(2, 1, 1)

    # label = 'Standard Power Iteration with scaling' +\
    #     r' $A \cdot v / ||A \cdot v||$'
    label = 'Standard'
    ax.plot(res_standard, label=label)

    fevals_CK = Results_CK.PermStorage['F Evaluations'][-1]
    # label = Method_CK.__class__.__name__+r' Power Iteration'
    # label += r' $\Delta_0=$'+'{:.0e}'.format(Method_CK.Delta0)
    label = 'CK'
    x = np.linspace(0, fevals_CK, len(res_CK))
    ax.plot(x, res_CK, label=label)

    fevals_CKPS = Results_CKPS.PermStorage['F Evaluations'][-1]
    # label = Method_CKPS.__class__.__name__+' Power Iteration'
    # label += r' $\Delta_0=$'+'{:.0e}'.format(Method_CKPS.Delta0)
    label = 'CKPS'
    x = np.linspace(0, fevals_CKPS, len(res_CKPS))
    ax.plot(x, res_CKPS, '-.', label=label)

    xlabel = r'# of $A \cdot v$ Evaluations'
    ax.set_xlabel(xlabel)

    ylabel = r'Norm of residual ($||\frac{A \cdot v}{||A \cdot v||}$'
    ylabel += r'$ - \frac{v}{||v||}||$)'
    ax.set_ylabel(ylabel)

    sizestr = str(A.shape[0]) + r' $\times$ ' + str(A.shape[1])
    if rho > 100:
        rhostr = r'$\rho(A)=$' + '{:.0e}'.format(rho)
    else:
        rhostr = r'$\rho(A)=$' + str(rho)
    rnkstr = r'$rank(A)=$' + str(rank)
    plt.title(sizestr + ' Matrix with ' + rhostr + ', ' + rnkstr)

    ax.legend()

    xlim = min(max(len(res_standard), fevals_CK, fevals_CKPS), Term.Tols[0])
    xlim = int(np.ceil(xlim / 10.) * 10)
    ax.set_xlim([0, xlim])

    ax.set_yscale('log', nonposy='clip')

    ax2 = fig.add_subplot(2, 1, 2)

    # label = 'Standard Power Iteration with scaling' +\
    #     r' $A \cdot v / ||A \cdot v||$'
    label = 'Standard'
    ax2.plot(res_standard, label=label)

    # label = Method_CK.__class__.__name__+r' Power Iteration'
    # label += r' $\Delta_0=$'+'{:.0e}'.format(Method_CK.Delta0)
    label = 'CK'
    ax2.plot(res_CK, label=label)

    # label = Method_CKPS.__class__.__name__+' Power Iteration'
    # label += r' $\Delta_0=$'+'{:.0e}'.format(Method_CKPS.Delta0)
    label = 'CKPS'
    ax2.plot(res_CKPS, '-.', label=label)

    xlabel = r'# of Iterations'
    ax2.set_xlabel(xlabel)

    ylabel = r'Norm of residual ($||\frac{A \cdot v}{||A \cdot v||}$'
    ylabel += r'$ - \frac{v}{||v||}||$)'
    ax2.set_ylabel(ylabel)

    ax2.legend()

    xlim = min(max(len(res_standard), len(res_CK), len(res_CKPS)),
               Term.Tols[0])
    xlim = int(np.ceil(xlim / 10.) * 10)
    ax2.set_xlim([0, xlim])

    ax2.set_yscale('log', nonposy='clip')

    plt.show()

    embed()
コード例 #8
0
ファイル: Demo_DangLan.py プロジェクト: gart17/VI-Solver
def Demo():

    #__SPHERE__##################################################

    # Define Dimension and Domain
    Domain = Sphere(Dim=100)

    # Set Method
    Method = HeunEuler(Domain=Domain, P=IdentityProjection(), Delta0=1e-2)

    # Set Options
    Init = Initialization(Step=-1e-1)
    Term = Termination(MaxIter=1000, Tols=[[Domain.f_Error, 1e-3]])
    Repo = Reporting(Requests=[Domain.f_Error])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Initialize Starting Point
    Start = 100 * np.ones(Domain.Dim)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    SPHERE_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, SPHERE_Results, Method, toc)

    #__KOJIMA-SHINDO__##################################################

    # Define Dimension and Domain
    Domain = KojimaShindo()

    # Set Method
    Method = HeunEuler(Domain=Domain, P=EntropicProjection(), Delta0=1e-1)

    # Set Options
    Init = Initialization(Step=-1e-1)
    Term = Termination(MaxIter=1000, Tols=[[Domain.gap_simplex, 1e-3]])
    Repo = Reporting(Requests=[Domain.gap_simplex])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Initialize Starting Point
    Start = np.ones(Domain.Dim) / np.double(Domain.Dim)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    KS_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, KS_Results, Method, toc)

    #__WATSON__##################################################

    trials = xrange(10)
    WAT_Results = [[] for i in trials]

    for p in trials:

        #Define Dimension and Domain
        Domain = Watson(Pos=p)

        # Set Method
        Method = HeunEuler(Domain=Domain, P=EntropicProjection(), Delta0=1e-1)

        # Set Options
        Init = Initialization(Step=-1e-1)
        Term = Termination(MaxIter=1000, Tols=[[Domain.gap_simplex, 1e-3]])
        Repo = Reporting(Requests=[Domain.gap_simplex])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        #Initialize Starting Point
        Start = np.ones(Domain.Dim) / np.double(Domain.Dim)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        tic = time.time()
        WAT_Results[p] = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, WAT_Results[p], Method, toc)

    #__SUN__##################################################

    trials = xrange(8000, 10000 + 1, 2000)
    Sun_Results = [[] for i in trials]

    for n in trials:

        #Define Dimension and Domain
        Domain = Sun(Dim=n)

        # Set Method
        Method = HeunEuler(Domain=Domain, P=EntropicProjection(), Delta0=1e-1)

        # Set Options
        Init = Initialization(Step=-1e-1)
        Term = Termination(MaxIter=1000, Tols=[[Domain.gap_simplex, 1e-3]])
        Repo = Reporting(Requests=[Domain.gap_simplex])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        #Initialize Starting Point
        Start = np.ones(Domain.Dim) / np.double(Domain.Dim)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        tic = time.time()
        ind = n / 2000 - 4
        Sun_Results[ind] = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, Sun_Results[ind], Method, toc)
コード例 #9
0
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 10  # number of possible maps
    T = 1000  # number of time steps
    eta = 1e-3  # learning rate

    print('Creating Domains')

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    n = 0
    while len(Domains) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=None)
        Domain = SOI(Network=Network, alpha=2)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Assert PD
        J = approx_jacobian(Domain.F, Start)
        eigs = np.linalg.eigvals(J + J.T)
        if not np.all(eigs > 0):
            continue
        _J = approx_jacobian(Domain.F, Start + 0.5)
        assert np.allclose(J, _J,
                           atol=1e-5)  # assert J is constant (unique for SOI)

        # Record Domain
        Domains += [Domain]

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        CurlBounds += [
            np.sqrt(8) *
            svds(J, k=1, which='LM', return_singular_vectors=False).item()
        ]

        # Set Method
        Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-3)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,
                           Tols=[(Domain.gap_rplus, 1e-6 * gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, SOI_Results, Method, toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])

    # Select First Domain
    idx = np.random.choice(len(Domains))

    # Domain Sequence
    idx_seq = []
    X_seq = []
    F_seq = []

    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        # record prediction
        X_seq += [X]
        # record domain
        idx_seq += [idx]
        # retrieve domain
        Domain = Domains[idx]
        # record F
        FX = Domain.F(X)
        F_seq += [FX]
        # update prediction
        X = BoxProjection(lo=0).P(X, -eta, FX)
        # update domain
        idx = np.random.choice(len(Domains))

    print('Computing Optimal Strategy')

    weights = np.bincount(idx_seq, minlength=len(Domains)) / len(idx_seq)
    print('Weights: ', weights)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains, weights=weights)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,
                                  P=BoxProjection(lo=0),
                                  Delta0=1e-5)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    # Assert PSD - sum of PSD is PSD doesn't hurt to check
    J = approx_jacobian(Domain.F, Start)
    eigs = np.linalg.eigvals(J + J.T)
    assert np.all(eigs > 0)
    sigma = min(eigs)

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000, Tols=[(Domain.gap_rplus, 1e-10 * gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, SOI_Results, Method, toc)

    print('Computing Regrets')

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]

    # Record constants for bounds
    L = np.sqrt(np.mean(np.linalg.norm(F_seq, axis=1)**2.))
    # B = np.linalg.norm(X_Opt)
    B = 2. * np.max(np.linalg.norm(X_Stars, axis=1))
    eta_opt = B / (L * np.sqrt(2 * T))
    bound_opt = B * L * np.sqrt(2 * T)
    reg_bound = (B**2) / (2 * eta) + eta * T * L**2

    opt_distances = []
    equi_distances = []
    regret_standards = []
    regret_news = []
    Fnorms = []
    stokes_exact = []
    stokes = []
    areas_exact = []
    areas = []
    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        idx = idx_seq[t]
        X = X_seq[t]
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        if t > 0:
            equi = X_seq[t - 1]
        else:
            # equi = np.zeros_like(X)
            equi = X
        # calculate distance
        opt_distances += [np.linalg.norm(X_Opt - X)]
        equi_distances += [np.linalg.norm(equi - X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain, LineContour(equi, X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain, LineContour(equi, X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain, LineContour(X_Opt, X))
        regret_news += [integral(ci_new)]
        # calculate bound
        area_exact = herons(X_Opt, X, equi)  # exact area
        area = eta_opt * L * (np.linalg.norm(X) + B)
        areas_exact += [area_exact]
        areas += [area]
        stokes_exact += [CurlBounds[idx] * area_exact]
        stokes += [CurlBounds[idx] * area]
        # stokes += [np.max(CurlBounds[idx]*regret_news[-1]/sigma,0)]
        # calculate Fnorm
        Fnorms += [np.linalg.norm(F_seq[t])]

    ts_p1 = range(1, T + 1)
    opt_distances_avg = np.divide(np.cumsum(opt_distances), ts_p1)
    equi_distances_avg = np.divide(np.cumsum(equi_distances), ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards), ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news), ts_p1)
    areas_exact_avg = np.divide(np.cumsum(areas_exact), ts_p1)
    areas_avg = np.divide(np.cumsum(areas), ts_p1)
    stokes_exact_avg = np.divide(np.cumsum(stokes_exact), ts_p1)
    stokes_avg = np.divide(np.cumsum(stokes), ts_p1)
    Fnorms_avg = np.divide(np.cumsum(Fnorms), ts_p1)

    np.savez_compressed('NoRegret_MLN_new.npz',
                        opt_d_avg=opt_distances_avg,
                        equi_d_avg=equi_distances_avg,
                        rs_avg=regret_standards_avg,
                        rn_avg=regret_news_avg,
                        stokes_exact=stokes_exact_avg,
                        stokes=stokes_avg)

    plt.subplot(2, 1, 2)
    plt.semilogy(ts,
                 opt_distances_avg,
                 'k',
                 label='Average Distance to Optimal')
    plt.semilogy(ts,
                 equi_distances_avg,
                 'r',
                 label='Average Distance to Reference')
    plt.semilogy(ts, areas_exact_avg, 'g-', label='Area (exact)')
    plt.semilogy(ts, areas_avg, 'm-', label='Area')
    plt.semilogy(ts, Fnorms_avg, 'b-', label='Fnorms')
    # plt.title('Demonstration of No-Regret on MLN')
    plt.xlabel('Time Step')
    plt.ylabel('Euclidean Distance')
    # plt.legend()
    lgd1 = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)

    plt.subplot(2, 1, 1)
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    plt.fill_between(ts,
                     regret_news_avg - stokes,
                     regret_news_avg + stokes,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound')
    plt.fill_between(ts,
                     regret_news_avg - stokes_exact,
                     regret_news_avg + stokes_exact,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound (exact)')

    # plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    # plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0, T])
    plt.ylim([-5000, 5000])
    # plt.legend(loc='lower right')
    lgd2 = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN_new.pdf',
                format='pdf',
                additional_artists=[lgd1, lgd2],
                bbox_inches='tight')

    fontsize = 18
    plt.figure()
    plt.subplot(1, 1, 1)
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')
    plt.fill_between(ts,
                     regret_news_avg - stokes_exact,
                     regret_news_avg + stokes_exact,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound')

    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step', fontsize=fontsize)
    plt.ylabel('Negative Auto-Welfare', fontsize=fontsize)
    plt.xlim([0, T])
    plt.ylim([0, 5000])
    # plt.legend(loc='lower right')
    lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=fontsize)
    plt.title('Demonstration of No-Regret on MLN', fontsize=fontsize)

    plt.savefig('NoRegret_MLN_new2.pdf',
                format='pdf',
                additional_artists=[lgd],
                bbox_inches='tight')

    plt.figure()
    plt.subplot(1, 1, 1)
    plt.plot(ts,
             -regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{2}$')
    plt.plot(ts, -regret_news_avg, 'b-', label=r'regret$_{1}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')
    plt.fill_between(ts,
                     -regret_news_avg - stokes_exact,
                     -regret_news_avg + stokes_exact,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound')

    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step', fontsize=fontsize)
    plt.ylabel('Auto-Welfare Regret', fontsize=fontsize)
    plt.xlim([0, T])
    plt.ylim([-5000, 0])
    # plt.legend(loc='lower right')
    lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=fontsize)
    plt.title('Demonstration of No-Regret on MLN', fontsize=fontsize)

    plt.savefig('NoRegret_MLN_new3.pdf',
                format='pdf',
                additional_artists=[lgd],
                bbox_inches='tight')

    plt.figure()
    plt.subplot(1, 1, 1)
    plt.plot(ts, regret_news_avg, 'b-')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')
    # plt.fill_between(ts, -regret_news_avg-stokes_exact, -regret_news_avg+stokes_exact,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')

    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step', fontsize=fontsize)
    plt.ylabel('OMO Path Integral Regret', fontsize=fontsize)
    plt.xlim([0, T])
    plt.ylim([0, 5000])
    # plt.legend(loc='lower right')
    # lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=fontsize)
    plt.title('Demonstration of No-Regret on MLN', fontsize=fontsize)

    plt.savefig('NoRegret_MLN_new4.pdf', format='pdf', bbox_inches='tight')

    sat_exact = np.logical_or(
        regret_standards_avg >= regret_news_avg - stokes_exact,
        regret_standards_avg <= regret_news_avg + stokes_exact)
    sat = np.logical_or(regret_standards_avg >= regret_news_avg - stokes,
                        regret_standards_avg <= regret_news_avg + stokes)

    embed()
コード例 #10
0
ファイル: Demo_OMO2.py プロジェクト: vishalbelsare/VI-Solver
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 2  # number of possible maps
    T = 100  # number of time steps
    eta = .01  # learning rate

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    # for n in range(N):
    n = 0
    while len(X_Stars) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3,n=2,o=2,seed=n)
        Domain = SOI(Network=Network,alpha=2)

        # Record Domain
        Domains += [Domain]

        # Set Method
        Method = HeunEuler(Domain=Domain,P=BoxProjection(lo=0),Delta0=1e-3)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        J = approx_jacobian(Domain.F,Start)
        if not np.all(np.linalg.eigvals(J+J.T) >= 0):
            pass
        _J = approx_jacobian(Domain.F,Start+0.5)
        assert np.allclose(J,_J,atol=1e-5)
        CurlBounds += [np.sqrt(18)*svds(J,k=1,which='LM',return_singular_vectors=False).item()]

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,Tols=[(Domain.gap_rplus,1e-6*gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init,Term,Repo,Misc)

        # Print Stats
        PrintSimStats(Domain,Method,Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start,Method,Domain,Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options,SOI_Results,Method,toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,P=BoxProjection(lo=0),Delta0=1e-3)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    J = approx_jacobian(Domain.F,Start)
    assert np.all(np.linalg.eigvals(J+J.T) >= 0)

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000,Tols=[(Domain.gap_rplus,1e-10*gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init,Term,Repo,Misc)

    # Print Stats
    PrintSimStats(Domain,Method,Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start,Method,Domain,Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options,SOI_Results,Method,toc)

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]
    # X_Opt = X_Stars[0]

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])

    # Select First Domain
    idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))

    distances = []
    loss_infs = []
    regret_standards = []
    regret_news = []
    stokes = []
    ts = range(T)
    for t in ts:
        print('t = '+str(t))
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        # equi = X_Stars[idx]
        equi = np.zeros_like(X_Stars[idx])
        # calculate distance
        distances += [np.linalg.norm(X_Opt-X)]
        # calculate infinity loss
        # loss_infs += [infinity_loss(Domain,X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain,LineContour(equi,X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain,LineContour(equi,X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain,LineContour(X_Opt,X))
        regret_news += [integral(ci_new)]
        # calculate bound
        # area = 0.5*np.prod(np.sort([np.linalg.norm(X_Opt-equi),np.linalg.norm(X-X_Opt),np.linalg.norm(equi-X)])[:2])  # area upper bound
        area = herons(X_Opt,X,equi)  # exact area
        stokes += [CurlBounds[idx]*area]
        # update prediction
        X = BoxProjection(lo=0).P(X,-eta,Domain.F(X))
        # update domain
        idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    # embed()
    ts_p1 = range(1,T+1)
    distances_avg = np.divide(np.cumsum(distances),ts_p1)
    # loss_infs_avg = np.divide(np.cumsum(loss_infs),ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards),ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news),ts_p1)
    stokes = np.divide(np.cumsum(stokes),ts_p1)

    # np.savez_compressed('NoRegret_MLN.npz',d_avg=distances_avg,
    #                     linf_avg=loss_infs_avg,rs_avg=regret_standards_avg,
    #                     rn_avg=regret_news_avg,stokes=stokes)

    plt.subplot(2, 1, 2)
    plt.plot(ts, distances_avg, 'k',label='Average Distance')
    plt.title('Demonstration of No-Regret on MLN')
    plt.ylabel('Euclidean Distance')
    plt.legend()

    plt.subplot(2, 1, 1)
    # plt.plot(ts, loss_infs_avg, 'k--', label=r'loss$_{\infty}$')
    plt.plot(ts, regret_standards_avg, 'r--o', markevery=T//20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=0, label='Stokes Bound')
    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0,T])
    # plt.ylim([-250,1000])
    plt.legend()
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN2')

    # data = np.load('NoRegret2.npz')
    # distances_avg = data['d_avg']
    # loss_infs_avg = data['linf_avg']
    # regret_standards_avg = data['rs_avg']
    # regret_news_avg = data['rn_avg']
    # stokes = data['stokes']
    # ts = range(len(distances_avg))

    embed()
コード例 #11
0
def Demo():

    #__ONLINE_MONOTONE_EQUILIBRATION_DEMO_OF_A_SERVICE_ORIENTED_INTERNET__######

    # Define Number of Different VIs
    N = 10
    np.random.seed(0)

    # Define Initial Network and Domain
    World = np.random.randint(N)
    Worlds = [World]
    Network = CreateRandomNetwork(m=3, n=2, o=2, seed=World)
    Domain = SOI(Network=Network, alpha=2)

    # Define Initial Strategy
    Strategies = [np.zeros(Domain.Dim)]
    eta = 0.1

    for t in range(1000):

        #__PERFORM_SINGLE_UPDATE

        print('Time ' + str(t))

        # Set Method
        Method = Euler(Domain=Domain, P=BoxProjection(lo=0))

        # Set Options
        Init = Initialization(Step=-eta)
        Term = Termination(MaxIter=1)
        Repo = Reporting(Requests=['Data'])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Run Update
        Result = Solve(Strategies[-1], Method, Domain, Options)

        # Get New Strategy
        Strategy = Result.PermStorage['Data'][-1]
        Strategies += [Strategy]

        #__DEFINE_NEXT_VI

        # Define Initial Network and Domain
        World = np.random.randint(N)
        Worlds += [World]
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=World)
        Domain = SOI(Network=Network, alpha=2)

    # Scrap Last Strategy / World
    Strategies = np.asarray(Strategies[:-1])
    Worlds = Worlds[:-1]

    # Store Equilibrium Strategies
    Equilibria = dict()

    for w in np.unique(Worlds):

        print('World ' + str(w))

        #__FIND_EQUILIBRIUM_SOLUTION_OF_VI

        # Define Initial Network and Domain
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=w)
        Domain = SOI(Network=Network, alpha=2)

        # Set Method
        Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-5)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,
                           Tols=[(Domain.gap_rplus, 1e-6 * gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus, 'Step', 'Data'])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        # Start Solver
        tic = time.time()
        Results = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, Results, Method, toc)

        # Get Equilibrium Strategy
        Equilibrium = Results.PermStorage['Data'][-1]
        Equilibria[w] = Equilibrium

    # Matched Equilibria & Costs
    Equilibria_Matched = np.asarray([Equilibria[w] for w in Worlds])

    # Compute Mean of Equilibria
    Mean_Equilibrium = np.mean(Equilibria_Matched, axis=0)

    # Compute Strategies Distance From Mean Equilibrium
    Distance_From_Mean = np.linalg.norm(Strategies - Mean_Equilibrium, axis=1)

    # Plot Results
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 1, 1)
    ax1.plot(Distance_From_Mean, label='Distance from Mean')
    ax1.set_title('Online Monotone Equilibration of Dynamic SOI Network')
    ax1.legend()
    ax1.set_xlabel('Time')

    plt.savefig('OMEfast.png')

    embed()
コード例 #12
0
def Demo():

    # __PENN_TREE_BANK____#################################################

    # Load Data
    # seq = np.arange(1000)
    train, valid, test, id_to_word, vocab = ptb_raw_data(
        '/Users/imgemp/Desktop/Data/simple-examples/data/')
    words, given_embeddings = pickle.load(open(
        '/Users/imgemp/Desktop/Data/polyglot-en.pkl', 'rb'),
                                          encoding='latin1')
    words_low = [word.lower() for word in words]
    word_to_id = dict(zip(words_low, range(len(words))))
    word_to_id['<eos>'] = word_to_id['</s>']
    y0 = get_y0(train, vocab)

    EDim = 5
    fix_embedding = False
    learn_embedding = True
    if fix_embedding:
        EDim = given_embeddings.shape[1]
        learn_embedding = False

    # Define Domain
    Domain = PennTreeBank(seq=train,
                          y0=None,
                          EDim=EDim,
                          batch_size=100,
                          learn_embedding=learn_embedding,
                          ord=1)

    # Set Method
    P = PTBProj(Domain.EDim)
    # Method = Euler(Domain=Domain,FixStep=True,P=P)
    Method = HeunEuler(Domain=Domain,
                       P=P,
                       Delta0=1e-4,
                       MinStep=-3.,
                       MaxStep=0.)
    # Method = CashKarp(Domain=Domain,P=P,Delta0=1e-1,MinStep=-5.,MaxStep=0.)

    # Set Options
    Term = Termination(MaxIter=10000)
    Repo = Reporting(
        Interval=10,
        Requests=[Domain.Error, Domain.PercCorrect, Domain.Perplexity,
                  'Step'])  #,
    # 'Step', 'F Evaluations',
    # 'Projections','Data'])
    Misc = Miscellaneous()
    Init = Initialization(Step=-1e-3)
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Initialize Starting Point
    if fix_embedding:
        missed = 0
        params = np.random.rand(Domain.param_len)
        avg_norm = np.linalg.norm(given_embeddings, axis=1).mean()
        embeddings = []
        for i in range(vocab):
            word = id_to_word[i]
            if word in word_to_id:
                embedding = given_embeddings[word_to_id[word]]
            else:
                missed += 1
                embedding = np.random.rand(Domain.EDim)
                embedding *= avg_norm / np.linalg.norm(embedding)
            embeddings += [embedding]
        polyglot_embeddings = np.hstack(embeddings)
        Start = np.hstack((params, polyglot_embeddings))
        print(np.linalg.norm(polyglot_embeddings))
        print(
            'Missing %d matches in polyglot dictionary -> given random embeddings.'
            % missed)
    else:
        # params = np.random.rand(Domain.param_len)*10
        # embeddings = np.random.rand(EDim*vocab)*.1
        # Start = np.hstack((params,embeddings))
        # assert Start.shape[0] == Domain.Dim
        Start = np.random.rand(Domain.Dim)
    Start = P.P(Start)

    # Compute Initial Error
    print('Initial training error: %g' % Domain.Error(Start))
    print('Initial perplexity: %g' % Domain.Perplexity(Start))
    print('Initial percent correct: %g' % Domain.PercCorrect(Start))

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    PTB_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, PTB_Results, Method, toc)

    # Plot Results
    err = np.asarray(PTB_Results.PermStorage[Domain.Error])
    pc = np.asarray(PTB_Results.PermStorage[Domain.PercCorrect])
    perp = np.asarray(PTB_Results.PermStorage[Domain.Perplexity])
    steps = np.asarray(PTB_Results.PermStorage['Step'])
    t = np.arange(0, len(steps) * Repo.Interval, Repo.Interval)
    fig = plt.figure()
    ax = fig.add_subplot(411)
    ax.semilogy(t, err)
    ax.set_ylabel('Training Error')
    ax.set_title('Penn Tree Bank Training Evaluation')
    ax = fig.add_subplot(412)
    ax.semilogy(t, perp)
    ax.set_ylabel('Perplexity')
    ax = fig.add_subplot(413)
    ax.plot(t, pc)
    ax.set_ylabel('Percent Correct')
    ax = fig.add_subplot(414)
    ax.plot(t, steps)
    ax.set_ylabel('Step Size')
    ax.set_xlabel('Iterations (k)')
    plt.savefig('PTB')

    params_embeddings = np.asarray(PTB_Results.TempStorage['Data']).squeeze()
    params, embeddings = np.split(params_embeddings, [Domain.param_len])
    embeddings_split = np.split(embeddings, vocab)

    dists_comp = pdist(np.asarray(embeddings_split))
    dists_min = np.min(dists_comp)
    dists_max = np.max(dists_comp)
    dists = squareform(dists_comp)
    dists2 = np.asarray([np.linalg.norm(e) for e in embeddings_split])
    print('pairwise dists', np.mean(dists), dists_min, dists_max)
    print('embedding norms', np.mean(dists2), np.min(dists2), np.max(dists2))
    print('params', np.mean(params), np.min(np.abs(params)),
          np.max(np.abs(params)))
コード例 #13
0
ファイル: Demo_OMO2c.py プロジェクト: vishalbelsare/VI-Solver
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 10  # number of possible maps
    T = 1000  # number of time steps
    eta = 1e-3  # learning rate

    print('Creating Domains')

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    n = 0
    while len(Domains) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=None)
        Domain = SOI(Network=Network, alpha=2)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Assert PD
        J = approx_jacobian(Domain.F, Start)
        eigs = np.linalg.eigvals(J + J.T)
        eigs_i = np.abs(np.linalg.eigvals(J - J.T))
        if not np.all(eigs > 0):
            continue
        print(eigs.min(), eigs.max())
        print(eigs_i.min(), eigs_i.max())
        _J = approx_jacobian(Domain.F, Start + 0.5)
        assert np.allclose(J, _J,
                           atol=1e-5)  # assert J is constant (unique for SOI)

        # Record Domain
        Domains += [Domain]

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        CurlBounds += [
            np.sqrt(18) *
            svds(J, k=1, which='LM', return_singular_vectors=False).item()
        ]

        # Set Method
        Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-3)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,
                           Tols=[(Domain.gap_rplus, 1e-6 * gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, SOI_Results, Method, toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])
    # X = np.mean(X_Stars,axis=0)
    # X += np.random.rand(*X.shape)*np.linalg.norm(X)

    # Select First Domain
    # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    idx = np.random.choice(len(Domains))

    # Domain Sequence
    idx_seq = []
    X_seq = []
    F_seq = []

    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        # record prediction
        X_seq += [X]
        # record domain
        idx_seq += [idx]
        # retrieve domain
        Domain = Domains[idx]
        # record F
        FX = Domain.F(X)
        F_seq += [FX]
        # update prediction
        X = BoxProjection(lo=0).P(X, -eta, FX)
        # update domain
        # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
        idx = np.random.choice(len(Domains))

    L = np.sqrt(np.mean(np.linalg.norm(F_seq, axis=1)**2.))

    print('Computing Optimal Strategy')

    weights = np.bincount(idx_seq, minlength=len(Domains)) / len(idx_seq)
    print(weights)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains, weights=weights)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,
                                  P=BoxProjection(lo=0),
                                  Delta0=1e-5)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    # Assert PSD - sum of PSD is PSD doesn't hurt to check
    J = approx_jacobian(Domain.F, Start)
    eigs = np.linalg.eigvals(J + J.T)
    eigs_i = np.abs(np.linalg.eigvals(J - J.T))
    assert np.all(eigs > 0)
    print(eigs.min(), eigs.max())
    print(eigs_i.min(), eigs_i.max())

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000, Tols=[(Domain.gap_rplus, 1e-10 * gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, SOI_Results, Method, toc)

    print('Computing Regrets')

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]
    # X_Opt = X_Stars[0]
    B = np.linalg.norm(X_Opt)

    eta_opt = B / (L * np.sqrt(2 * T))
    bound_opt = B * L * np.sqrt(2 * T)
    reg_bound = (B**2) / (2 * eta) + eta * T * L**2

    distances = []
    loss_infs = []
    regret_standards = []
    regret_news = []
    stokes = []
    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        idx = idx_seq[t]
        X = X_seq[t]
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        equi = X_Stars[idx]
        # equi = np.zeros_like(X_Stars[idx])
        # calculate distance
        distances += [np.linalg.norm(X_Opt - X)]
        # calculate infinity loss
        # loss_infs += [infinity_loss(Domain,X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain, LineContour(equi, X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain, LineContour(equi, X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain, LineContour(X_Opt, X))
        regret_news += [integral(ci_new)]
        # calculate bound
        # area = 0.5*np.prod(np.sort([np.linalg.norm(X_Opt-equi),np.linalg.norm(X-X_Opt),np.linalg.norm(equi-X)])[:2])  # area upper bound
        area = herons(X_Opt, X, equi)  # exact area
        stokes += [CurlBounds[idx] * area]
        # # update prediction
        # X = BoxProjection(lo=0).P(X,-eta,Domain.F(X))
        # # update domain
        # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    # embed()
    ts_p1 = range(1, T + 1)
    distances_avg = np.divide(np.cumsum(distances), ts_p1)
    # loss_infs_avg = np.divide(np.cumsum(loss_infs),ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards), ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news), ts_p1)
    stokes = np.divide(np.cumsum(stokes), ts_p1)

    # np.savez_compressed('NoRegret_MLN2c.npz',d_avg=distances_avg,
    #                     linf_avg=loss_infs_avg,rs_avg=regret_standards_avg,
    #                     rn_avg=regret_news_avg,stokes=stokes)

    plt.subplot(2, 1, 2)
    plt.plot(ts, distances_avg, 'k', label='Average Distance')
    # plt.title('Demonstration of No-Regret on MLN')
    plt.xlabel('Time Step')
    plt.ylabel('Euclidean Distance')
    plt.legend()

    plt.subplot(2, 1, 1)
    # plt.plot(ts, loss_infs_avg, 'k--', label=r'loss$_{\infty}$')
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=0, label='Stokes Bound')
    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    # plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0, T])
    # plt.ylim([-200,200])
    plt.legend(loc='lower right')
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN2c')

    # data = np.load('NoRegret2.npz')
    # distances_avg = data['d_avg']
    # loss_infs_avg = data['linf_avg']
    # regret_standards_avg = data['rs_avg']
    # regret_news_avg = data['rn_avg']
    # stokes = data['stokes']
    # ts = range(len(distances_avg))

    embed()