예제 #1
0
def Demo():

    #__LQ_GAN__##############################################

    # Interpolation between F and RipCurl should probably be nonlinear
    # in terms of L2 norms of matrices if the norms essentially represent the largest eigenvalues

    # what's an example of a pseudomonotone field? stretch vertically linearly a bit? quasimonotone?

    # the extragradient method uses the same step size for the first and second step, as the step size goes
    # to zero, extragradient asymptotes to the projection method
    # modified extragradient methods use different step sizes. if we keep the first step size fixed to some
    # positive value and shrink the second, the dynamics of extragradient remain as desired
    # this is essentially what HE_PhaseSpace is showing
    # HE and HE_PhaseSpace are designed to "simulate" a trajectory - they do not actually change the effective
    # dynamics of the vector field

    # Define Network and Domain
    Domain = LQ(sig=1)

    # Set Method
    lo = [-np.inf,1e-2]
    # Method = Euler(Domain=Domain,FixStep=True,P=BoxProjection(lo=lo))
    # Method = EG(Domain=Domain,FixStep=True,P=BoxProjection(lo=lo))
    # Method = HeunEuler(Domain=Domain,Delta0=1e-4,P=BoxProjection(lo=lo))
    Method = HeunEuler_PhaseSpace(Domain=Domain,Delta0=1e-2,P=BoxProjection(lo=lo))

    # Initialize Starting Point
    # Start = np.random.rand(Domain.Dim)
    scale = 30
    Start = np.array([50.,50.])
    xoff = 0
    yoff = 0
    # no difference between eigenvalues of J at [1.,3.5] and eigenvalues of an outward spiral: a = np.array([[-1,6.92],[-6.92,-1]])
    j = Domain.J(Start)
    print('original evs')
    print(np.linalg.eigvals(j))
    print(np.linalg.eigvals(j+j.T))
    f = Domain.F(Start)
    jsym = j+j.T
    # print(f.dot(jsym.dot(f)))
    tf = Domain.TF(Start)
    print('tf')
    print(tf)
    print(np.linalg.eigvals(tf))
    jrc = Domain.JRipCurl(Start)
    print('jrc')
    print(jrc)
    print(0.5*(jrc+jrc.T))
    print(np.linalg.eigvals(jrc+jrc.T))
    jreg = Domain.JReg(Start)
    print('jreg')
    print(jreg)
    print(0.5*(jreg+jreg.T))
    print(np.linalg.eigvals(jreg+jreg.T))
    jasy = j-j.T
    print('exact')
    print(0.5*np.dot(jasy.T,jasy))

    for gam in np.linspace(0,1,20):
        # print(Domain.JRegEV(Start,gam))
        print(Domain.JRCEV(Start,gam))

    jap = approx_jacobian(Domain.F,Start)
    print(jap)
    print(np.linalg.eigvals(0.5*(jap+jap.T)))

    y = np.array([0,1])
    x = np.array([1,1e-1])
    pre = np.dot(Domain.F(y),x-y)
    post = np.dot(Domain.F(x),x-y)
    print(pre)
    print(post)

    d = 2
    W2 = Domain.sym(np.random.rand(d,d))
    w1 = np.random.rand(d)
    A = np.tril(np.random.rand(d,d))
    A[range(d),range(d)] = np.clip(A[range(d),range(d)],1e-6,np.inf)
    b = np.random.rand(d)
    dmult = np.hstack([W2.flatten(),w1,A.flatten(),b])
    jmult = Domain.Jmult(dmult)
    jskew = (jmult-jmult.T)
    print(np.linalg.matrix_rank(jskew,tol=1e-16))

    W2 = Domain.sym(np.ones((d,d)))
    w1 = np.ones(d)
    A = np.tril(np.ones((d,d)))
    A[range(d),range(d)] = np.clip(A[range(d),range(d)],0,np.inf)
    b = np.ones(d)
    dmult = np.hstack([W2.flatten(),w1,A.flatten(),b])
    jmult = Domain.Jmult(dmult)
    jskew = (jmult-jmult.T)
    print(np.linalg.matrix_rank(jskew))

    W2 = Domain.sym(np.zeros((d,d)))
    w1 = np.zeros(d)
    A = np.tril(np.ones((d,d)))
    A[range(d),range(d)] = np.clip(A[range(d),range(d)],0,np.inf)
    b = np.zeros(d)
    dmult = np.hstack([W2.flatten(),w1,A.flatten(),b])
    jmult = Domain.Jmult(dmult)
    jskew = (jmult-jmult.T)
    print(np.linalg.matrix_rank(jskew))

    np.set_printoptions(linewidth=200)

    s = (d**2+d)//2
    jskewblock = jskew[:s,s+d:s+d+s]

    embed()

    # Set Options
    Init = Initialization(Step=-1e-5)
    Term = Termination(MaxIter=1000,Tols=[(Domain.dist,1e-4)])
    Repo = Reporting(Requests=['Step', 'F Evaluations',
                               'Projections','Data',Domain.dist])
    Misc = Miscellaneous()
    Options = DescentOptions(Init,Term,Repo,Misc)

    # Print Stats
    PrintSimStats(Domain,Method,Options)

    # Start Solver
    tic = time.time()
    LQ_Results = Solve(Start,Method,Domain,Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options,LQ_Results,Method,toc)

    data = np.array(LQ_Results.PermStorage['Data'])

    X, Y = np.meshgrid(np.arange(-2*scale + xoff, 2*scale + xoff, .2*scale), np.arange(1e-2 + yoff, 4*scale + yoff, .2*scale))
    U = np.zeros_like(X)
    V = np.zeros_like(Y)
    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            vec = -Domain.F([X[i,j],Y[i,j]])
            U[i,j] = vec[0]
            V[i,j] = vec[1]

    fig = plt.figure()
    ax = fig.add_subplot(111)
    Q = plt.quiver(X[::3, ::3], Y[::3, ::3], U[::3, ::3], V[::3, ::3],
                   pivot='mid', units='inches')
    ax.plot(data[:,0],data[:,1],'-r')
    ax.plot([data[0,0]],[data[0,1]],'k*')
    ax.plot([data[-1,0]],[data[-1,1]],'b*')
    ax.set_xlim([-2*scale + xoff,2*scale + xoff])
    ax.set_ylim([-.1*scale + yoff,4*scale + yoff])
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    # plt.show()
    # plt.savefig('original.png')
    # plt.savefig('EGoriginal.png')
    # plt.savefig('RipCurl.png')
    # plt.savefig('RipCurl2.png')
    # plt.savefig('EG.png')
    # plt.savefig('GReg.png')
    plt.savefig('Testing.png')
예제 #2
0
def Demo():

    #__LQ_GAN__##############################################

    # Interpolation between F and RipCurl should probably be nonlinear
    # in terms of L2 norms of matrices if the norms essentially represent the largest eigenvalues

    # what's an example of a pseudomonotone field? stretch vertically linearly a bit? quasimonotone?

    # the extragradient method uses the same step size for the first and second step, as the step size goes
    # to zero, extragradient asymptotes to the projection method
    # modified extragradient methods use different step sizes. if we keep the first step size fixed to some
    # positive value and shrink the second, the dynamics of extragradient remain as desired
    # this is essentially what HE_PhaseSpace is showing
    # HE and HE_PhaseSpace are designed to "simulate" a trajectory - they do not actually change the effective
    # dynamics of the vector field

    # Define Network and Domain
    Domain = LQ(sig=1)

    # Set Method
    lo = [-np.inf, 1e-2]
    # Method = Euler(Domain=Domain,FixStep=True,P=BoxProjection(lo=lo))
    # Method = EG(Domain=Domain,FixStep=True,P=BoxProjection(lo=lo))
    # Method = HeunEuler(Domain=Domain,Delta0=1e-4,P=BoxProjection(lo=lo))
    Method = HeunEuler_PhaseSpace(Domain=Domain,
                                  Delta0=1e-2,
                                  P=BoxProjection(lo=lo))

    # Initialize Starting Point
    # Start = np.random.rand(Domain.Dim)
    scale = 30
    Start = np.array([50., 50.])
    xoff = 0
    yoff = 0
    # no difference between eigenvalues of J at [1.,3.5] and eigenvalues of an outward spiral: a = np.array([[-1,6.92],[-6.92,-1]])
    j = Domain.J(Start)
    print('original evs')
    print(np.linalg.eigvals(j))
    print(np.linalg.eigvals(j + j.T))
    f = Domain.F(Start)
    jsym = j + j.T
    # print(f.dot(jsym.dot(f)))
    tf = Domain.TF(Start)
    print('tf')
    print(tf)
    print(np.linalg.eigvals(tf))
    jrc = Domain.JRipCurl(Start)
    print('jrc')
    print(jrc)
    print(0.5 * (jrc + jrc.T))
    print(np.linalg.eigvals(jrc + jrc.T))
    jreg = Domain.JReg(Start)
    print('jreg')
    print(jreg)
    print(0.5 * (jreg + jreg.T))
    print(np.linalg.eigvals(jreg + jreg.T))
    jasy = j - j.T
    print('exact')
    print(0.5 * np.dot(jasy.T, jasy))

    for gam in np.linspace(0, 1, 20):
        # print(Domain.JRegEV(Start,gam))
        print(Domain.JRCEV(Start, gam))

    jap = approx_jacobian(Domain.F, Start)
    print(jap)
    print(np.linalg.eigvals(0.5 * (jap + jap.T)))

    y = np.array([0, 1])
    x = np.array([1, 1e-1])
    pre = np.dot(Domain.F(y), x - y)
    post = np.dot(Domain.F(x), x - y)
    print(pre)
    print(post)

    d = 2
    W2 = Domain.sym(np.random.rand(d, d))
    w1 = np.random.rand(d)
    A = np.tril(np.random.rand(d, d))
    A[range(d), range(d)] = np.clip(A[range(d), range(d)], 1e-6, np.inf)
    b = np.random.rand(d)
    dmult = np.hstack([W2.flatten(), w1, A.flatten(), b])
    jmult = Domain.Jmult(dmult)
    jskew = (jmult - jmult.T)
    print(np.linalg.matrix_rank(jskew, tol=1e-16))

    W2 = Domain.sym(np.ones((d, d)))
    w1 = np.ones(d)
    A = np.tril(np.ones((d, d)))
    A[range(d), range(d)] = np.clip(A[range(d), range(d)], 0, np.inf)
    b = np.ones(d)
    dmult = np.hstack([W2.flatten(), w1, A.flatten(), b])
    jmult = Domain.Jmult(dmult)
    jskew = (jmult - jmult.T)
    print(np.linalg.matrix_rank(jskew))

    W2 = Domain.sym(np.zeros((d, d)))
    w1 = np.zeros(d)
    A = np.tril(np.ones((d, d)))
    A[range(d), range(d)] = np.clip(A[range(d), range(d)], 0, np.inf)
    b = np.zeros(d)
    dmult = np.hstack([W2.flatten(), w1, A.flatten(), b])
    jmult = Domain.Jmult(dmult)
    jskew = (jmult - jmult.T)
    print(np.linalg.matrix_rank(jskew))

    np.set_printoptions(linewidth=200)

    s = (d**2 + d) // 2
    jskewblock = jskew[:s, s + d:s + d + s]

    embed()

    # Set Options
    Init = Initialization(Step=-1e-5)
    Term = Termination(MaxIter=1000, Tols=[(Domain.dist, 1e-4)])
    Repo = Reporting(
        Requests=['Step', 'F Evaluations', 'Projections', 'Data', Domain.dist])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    LQ_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, LQ_Results, Method, toc)

    data = np.array(LQ_Results.PermStorage['Data'])

    X, Y = np.meshgrid(
        np.arange(-2 * scale + xoff, 2 * scale + xoff, .2 * scale),
        np.arange(1e-2 + yoff, 4 * scale + yoff, .2 * scale))
    U = np.zeros_like(X)
    V = np.zeros_like(Y)
    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            vec = -Domain.F([X[i, j], Y[i, j]])
            U[i, j] = vec[0]
            V[i, j] = vec[1]

    fig = plt.figure()
    ax = fig.add_subplot(111)
    Q = plt.quiver(X[::3, ::3],
                   Y[::3, ::3],
                   U[::3, ::3],
                   V[::3, ::3],
                   pivot='mid',
                   units='inches')
    ax.plot(data[:, 0], data[:, 1], '-r')
    ax.plot([data[0, 0]], [data[0, 1]], 'k*')
    ax.plot([data[-1, 0]], [data[-1, 1]], 'b*')
    ax.set_xlim([-2 * scale + xoff, 2 * scale + xoff])
    ax.set_ylim([-.1 * scale + yoff, 4 * scale + yoff])
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    # plt.show()
    # plt.savefig('original.png')
    # plt.savefig('EGoriginal.png')
    # plt.savefig('RipCurl.png')
    # plt.savefig('RipCurl2.png')
    # plt.savefig('EG.png')
    # plt.savefig('GReg.png')
    plt.savefig('Testing.png')
예제 #3
0
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 10  # number of possible maps
    T = 1000  # number of time steps
    eta = 1e-3  # learning rate

    print('Creating Domains')

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    n = 0
    while len(Domains) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=None)
        Domain = SOI(Network=Network, alpha=2)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Assert PD
        J = approx_jacobian(Domain.F, Start)
        eigs = np.linalg.eigvals(J + J.T)
        if not np.all(eigs > 0):
            continue
        _J = approx_jacobian(Domain.F, Start + 0.5)
        assert np.allclose(J, _J,
                           atol=1e-5)  # assert J is constant (unique for SOI)

        # Record Domain
        Domains += [Domain]

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        CurlBounds += [
            np.sqrt(8) *
            svds(J, k=1, which='LM', return_singular_vectors=False).item()
        ]

        # Set Method
        Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-3)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,
                           Tols=[(Domain.gap_rplus, 1e-6 * gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, SOI_Results, Method, toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])

    # Select First Domain
    idx = np.random.choice(len(Domains))

    # Domain Sequence
    idx_seq = []
    X_seq = []
    F_seq = []

    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        # record prediction
        X_seq += [X]
        # record domain
        idx_seq += [idx]
        # retrieve domain
        Domain = Domains[idx]
        # record F
        FX = Domain.F(X)
        F_seq += [FX]
        # update prediction
        X = BoxProjection(lo=0).P(X, -eta, FX)
        # update domain
        idx = np.random.choice(len(Domains))

    print('Computing Optimal Strategy')

    weights = np.bincount(idx_seq, minlength=len(Domains)) / len(idx_seq)
    print('Weights: ', weights)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains, weights=weights)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,
                                  P=BoxProjection(lo=0),
                                  Delta0=1e-5)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    # Assert PSD - sum of PSD is PSD doesn't hurt to check
    J = approx_jacobian(Domain.F, Start)
    eigs = np.linalg.eigvals(J + J.T)
    assert np.all(eigs > 0)
    sigma = min(eigs)

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000, Tols=[(Domain.gap_rplus, 1e-10 * gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, SOI_Results, Method, toc)

    print('Computing Regrets')

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]

    # Record constants for bounds
    L = np.sqrt(np.mean(np.linalg.norm(F_seq, axis=1)**2.))
    # B = np.linalg.norm(X_Opt)
    B = 2. * np.max(np.linalg.norm(X_Stars, axis=1))
    eta_opt = B / (L * np.sqrt(2 * T))
    bound_opt = B * L * np.sqrt(2 * T)
    reg_bound = (B**2) / (2 * eta) + eta * T * L**2

    opt_distances = []
    equi_distances = []
    regret_standards = []
    regret_news = []
    Fnorms = []
    stokes_exact = []
    stokes = []
    areas_exact = []
    areas = []
    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        idx = idx_seq[t]
        X = X_seq[t]
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        if t > 0:
            equi = X_seq[t - 1]
        else:
            # equi = np.zeros_like(X)
            equi = X
        # calculate distance
        opt_distances += [np.linalg.norm(X_Opt - X)]
        equi_distances += [np.linalg.norm(equi - X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain, LineContour(equi, X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain, LineContour(equi, X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain, LineContour(X_Opt, X))
        regret_news += [integral(ci_new)]
        # calculate bound
        area_exact = herons(X_Opt, X, equi)  # exact area
        area = eta_opt * L * (np.linalg.norm(X) + B)
        areas_exact += [area_exact]
        areas += [area]
        stokes_exact += [CurlBounds[idx] * area_exact]
        stokes += [CurlBounds[idx] * area]
        # stokes += [np.max(CurlBounds[idx]*regret_news[-1]/sigma,0)]
        # calculate Fnorm
        Fnorms += [np.linalg.norm(F_seq[t])]

    ts_p1 = range(1, T + 1)
    opt_distances_avg = np.divide(np.cumsum(opt_distances), ts_p1)
    equi_distances_avg = np.divide(np.cumsum(equi_distances), ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards), ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news), ts_p1)
    areas_exact_avg = np.divide(np.cumsum(areas_exact), ts_p1)
    areas_avg = np.divide(np.cumsum(areas), ts_p1)
    stokes_exact_avg = np.divide(np.cumsum(stokes_exact), ts_p1)
    stokes_avg = np.divide(np.cumsum(stokes), ts_p1)
    Fnorms_avg = np.divide(np.cumsum(Fnorms), ts_p1)

    np.savez_compressed('NoRegret_MLN_new.npz',
                        opt_d_avg=opt_distances_avg,
                        equi_d_avg=equi_distances_avg,
                        rs_avg=regret_standards_avg,
                        rn_avg=regret_news_avg,
                        stokes_exact=stokes_exact_avg,
                        stokes=stokes_avg)

    plt.subplot(2, 1, 2)
    plt.semilogy(ts,
                 opt_distances_avg,
                 'k',
                 label='Average Distance to Optimal')
    plt.semilogy(ts,
                 equi_distances_avg,
                 'r',
                 label='Average Distance to Reference')
    plt.semilogy(ts, areas_exact_avg, 'g-', label='Area (exact)')
    plt.semilogy(ts, areas_avg, 'm-', label='Area')
    plt.semilogy(ts, Fnorms_avg, 'b-', label='Fnorms')
    # plt.title('Demonstration of No-Regret on MLN')
    plt.xlabel('Time Step')
    plt.ylabel('Euclidean Distance')
    # plt.legend()
    lgd1 = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)

    plt.subplot(2, 1, 1)
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    plt.fill_between(ts,
                     regret_news_avg - stokes,
                     regret_news_avg + stokes,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound')
    plt.fill_between(ts,
                     regret_news_avg - stokes_exact,
                     regret_news_avg + stokes_exact,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound (exact)')

    # plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    # plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0, T])
    plt.ylim([-5000, 5000])
    # plt.legend(loc='lower right')
    lgd2 = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN_new.pdf',
                format='pdf',
                additional_artists=[lgd1, lgd2],
                bbox_inches='tight')

    fontsize = 18
    plt.figure()
    plt.subplot(1, 1, 1)
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')
    plt.fill_between(ts,
                     regret_news_avg - stokes_exact,
                     regret_news_avg + stokes_exact,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound')

    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step', fontsize=fontsize)
    plt.ylabel('Negative Auto-Welfare', fontsize=fontsize)
    plt.xlim([0, T])
    plt.ylim([0, 5000])
    # plt.legend(loc='lower right')
    lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=fontsize)
    plt.title('Demonstration of No-Regret on MLN', fontsize=fontsize)

    plt.savefig('NoRegret_MLN_new2.pdf',
                format='pdf',
                additional_artists=[lgd],
                bbox_inches='tight')

    plt.figure()
    plt.subplot(1, 1, 1)
    plt.plot(ts,
             -regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{2}$')
    plt.plot(ts, -regret_news_avg, 'b-', label=r'regret$_{1}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')
    plt.fill_between(ts,
                     -regret_news_avg - stokes_exact,
                     -regret_news_avg + stokes_exact,
                     facecolor='c',
                     alpha=0.2,
                     zorder=5,
                     label='Stokes Bound')

    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step', fontsize=fontsize)
    plt.ylabel('Auto-Welfare Regret', fontsize=fontsize)
    plt.xlim([0, T])
    plt.ylim([-5000, 0])
    # plt.legend(loc='lower right')
    lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=fontsize)
    plt.title('Demonstration of No-Regret on MLN', fontsize=fontsize)

    plt.savefig('NoRegret_MLN_new3.pdf',
                format='pdf',
                additional_artists=[lgd],
                bbox_inches='tight')

    plt.figure()
    plt.subplot(1, 1, 1)
    plt.plot(ts, regret_news_avg, 'b-')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')
    # plt.fill_between(ts, -regret_news_avg-stokes_exact, -regret_news_avg+stokes_exact,
    #                  facecolor='c', alpha=0.2, zorder=5, label='Stokes Bound')

    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step', fontsize=fontsize)
    plt.ylabel('OMO Path Integral Regret', fontsize=fontsize)
    plt.xlim([0, T])
    plt.ylim([0, 5000])
    # plt.legend(loc='lower right')
    # lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, fontsize=fontsize)
    plt.title('Demonstration of No-Regret on MLN', fontsize=fontsize)

    plt.savefig('NoRegret_MLN_new4.pdf', format='pdf', bbox_inches='tight')

    sat_exact = np.logical_or(
        regret_standards_avg >= regret_news_avg - stokes_exact,
        regret_standards_avg <= regret_news_avg + stokes_exact)
    sat = np.logical_or(regret_standards_avg >= regret_news_avg - stokes,
                        regret_standards_avg <= regret_news_avg + stokes)

    embed()
예제 #4
0
if __name__ == '__main__':
    # Creating a random LQGAN
    dim = 2
    s = (dim**2 + dim) // 2
    mu = np.zeros(dim)
    L = 10 * np.random.rand(dim, dim) - 5 + np.diag(5 * np.ones(dim))
    L[range(dim), range(dim)] = np.clip(L[range(dim), range(dim)], 1e-8,
                                        np.inf)
    L = np.tril(L)
    sig = np.dot(L, L.T)
    # sig = np.diag(np.random.rand(dim)/np.sqrt(2.))
    Domain = LQGAN(mu=mu, sig=sig)

    from VISolver.Projection import BoxProjection
    # Set Constraints
    loA = -np.inf * np.ones((dim, dim))
    loA[range(dim), range(dim)] = 1e-2
    lo = np.hstack(
        ([-np.inf] * (dim + s), loA[np.tril_indices(dim)], [-np.inf] * dim))
    P = BoxProjection(lo=lo)

    mx = -1
    for i in range(10000):
        Start = P.P(100 * np.random.rand(Domain.Dim) - 50.)
        jexact = Domain.J(Start)
        japprox = approx_jacobian(Domain._F, Start)
        newmx = np.max(np.abs(jexact - japprox))
        mx = max(mx, newmx)

    print(mx)
예제 #5
0
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 2  # number of possible maps
    T = 100  # number of time steps
    eta = .01  # learning rate

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    # for n in range(N):
    n = 0
    while len(X_Stars) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3,n=2,o=2,seed=n)
        Domain = SOI(Network=Network,alpha=2)

        # Record Domain
        Domains += [Domain]

        # Set Method
        Method = HeunEuler(Domain=Domain,P=BoxProjection(lo=0),Delta0=1e-3)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        J = approx_jacobian(Domain.F,Start)
        if not np.all(np.linalg.eigvals(J+J.T) >= 0):
            pass
        _J = approx_jacobian(Domain.F,Start+0.5)
        assert np.allclose(J,_J,atol=1e-5)
        CurlBounds += [np.sqrt(18)*svds(J,k=1,which='LM',return_singular_vectors=False).item()]

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,Tols=[(Domain.gap_rplus,1e-6*gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init,Term,Repo,Misc)

        # Print Stats
        PrintSimStats(Domain,Method,Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start,Method,Domain,Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options,SOI_Results,Method,toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,P=BoxProjection(lo=0),Delta0=1e-3)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    J = approx_jacobian(Domain.F,Start)
    assert np.all(np.linalg.eigvals(J+J.T) >= 0)

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000,Tols=[(Domain.gap_rplus,1e-10*gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init,Term,Repo,Misc)

    # Print Stats
    PrintSimStats(Domain,Method,Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start,Method,Domain,Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options,SOI_Results,Method,toc)

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]
    # X_Opt = X_Stars[0]

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])

    # Select First Domain
    idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))

    distances = []
    loss_infs = []
    regret_standards = []
    regret_news = []
    stokes = []
    ts = range(T)
    for t in ts:
        print('t = '+str(t))
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        # equi = X_Stars[idx]
        equi = np.zeros_like(X_Stars[idx])
        # calculate distance
        distances += [np.linalg.norm(X_Opt-X)]
        # calculate infinity loss
        # loss_infs += [infinity_loss(Domain,X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain,LineContour(equi,X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain,LineContour(equi,X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain,LineContour(X_Opt,X))
        regret_news += [integral(ci_new)]
        # calculate bound
        # area = 0.5*np.prod(np.sort([np.linalg.norm(X_Opt-equi),np.linalg.norm(X-X_Opt),np.linalg.norm(equi-X)])[:2])  # area upper bound
        area = herons(X_Opt,X,equi)  # exact area
        stokes += [CurlBounds[idx]*area]
        # update prediction
        X = BoxProjection(lo=0).P(X,-eta,Domain.F(X))
        # update domain
        idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    # embed()
    ts_p1 = range(1,T+1)
    distances_avg = np.divide(np.cumsum(distances),ts_p1)
    # loss_infs_avg = np.divide(np.cumsum(loss_infs),ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards),ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news),ts_p1)
    stokes = np.divide(np.cumsum(stokes),ts_p1)

    # np.savez_compressed('NoRegret_MLN.npz',d_avg=distances_avg,
    #                     linf_avg=loss_infs_avg,rs_avg=regret_standards_avg,
    #                     rn_avg=regret_news_avg,stokes=stokes)

    plt.subplot(2, 1, 2)
    plt.plot(ts, distances_avg, 'k',label='Average Distance')
    plt.title('Demonstration of No-Regret on MLN')
    plt.ylabel('Euclidean Distance')
    plt.legend()

    plt.subplot(2, 1, 1)
    # plt.plot(ts, loss_infs_avg, 'k--', label=r'loss$_{\infty}$')
    plt.plot(ts, regret_standards_avg, 'r--o', markevery=T//20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=0, label='Stokes Bound')
    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0,T])
    # plt.ylim([-250,1000])
    plt.legend()
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN2')

    # data = np.load('NoRegret2.npz')
    # distances_avg = data['d_avg']
    # loss_infs_avg = data['linf_avg']
    # regret_standards_avg = data['rs_avg']
    # regret_news_avg = data['rn_avg']
    # stokes = data['stokes']
    # ts = range(len(distances_avg))

    embed()
예제 #6
0
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 10  # number of possible maps
    T = 1000  # number of time steps
    eta = 1e-3  # learning rate

    print('Creating Domains')

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    n = 0
    while len(Domains) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3,n=2,o=2,seed=None)
        Domain = SOI(Network=Network,alpha=2)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Assert PD
        J = approx_jacobian(Domain.F,Start)
        eigs = np.linalg.eigvals(J+J.T)
        eigs_i = np.abs(np.linalg.eigvals(J-J.T))
        if not np.all(eigs > 0):
            continue
        print(eigs.min(),eigs.max())
        print(eigs_i.min(),eigs_i.max())
        _J = approx_jacobian(Domain.F,Start+0.5)
        assert np.allclose(J,_J,atol=1e-5)  # assert J is constant (unique for SOI)

        # Record Domain
        Domains += [Domain]

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        CurlBounds += [np.sqrt(18)*svds(J,k=1,which='LM',return_singular_vectors=False).item()]

        # Set Method
        Method = HeunEuler(Domain=Domain,P=BoxProjection(lo=0),Delta0=1e-3)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,Tols=[(Domain.gap_rplus,1e-6*gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init,Term,Repo,Misc)

        # Print Stats
        PrintSimStats(Domain,Method,Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start,Method,Domain,Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options,SOI_Results,Method,toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])
    # X = np.mean(X_Stars,axis=0)
    # X += np.random.rand(*X.shape)*np.linalg.norm(X)

    # Select First Domain
    # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    idx = np.random.choice(len(Domains))

    # Domain Sequence
    idx_seq = []
    X_seq = []
    F_seq = []

    ts = range(T)
    for t in ts:
        print('t = '+str(t),end='\r')
        # record prediction
        X_seq += [X]
        # record domain
        idx_seq += [idx]
        # retrieve domain
        Domain = Domains[idx]
        # record F
        FX = Domain.F(X)
        F_seq += [FX]
        # update prediction
        X = BoxProjection(lo=0).P(X,-eta,FX)
        # update domain
        # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
        idx = np.random.choice(len(Domains))

    L = np.sqrt(np.mean(np.linalg.norm(F_seq,axis=1)**2.))

    print('Computing Optimal Strategy')

    weights = np.bincount(idx_seq,minlength=len(Domains))/len(idx_seq)
    print(weights)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains,weights=weights)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,P=BoxProjection(lo=0),Delta0=1e-5)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    # Assert PSD - sum of PSD is PSD doesn't hurt to check
    J = approx_jacobian(Domain.F,Start)
    eigs = np.linalg.eigvals(J+J.T)
    eigs_i = np.abs(np.linalg.eigvals(J-J.T))
    assert np.all(eigs > 0)
    print(eigs.min(),eigs.max())
    print(eigs_i.min(),eigs_i.max())

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000,Tols=[(Domain.gap_rplus,1e-10*gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init,Term,Repo,Misc)

    # Print Stats
    PrintSimStats(Domain,Method,Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start,Method,Domain,Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options,SOI_Results,Method,toc)

    print('Computing Regrets')

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]
    # X_Opt = X_Stars[0]
    B = np.linalg.norm(X_Opt)

    eta_opt = B/(L*np.sqrt(2*T))
    bound_opt = B*L*np.sqrt(2*T)
    reg_bound = (B**2)/(2*eta) + eta*T*L**2

    distances = []
    loss_infs = []
    regret_standards = []
    regret_news = []
    stokes = []
    ts = range(T)
    for t in ts:
        print('t = '+str(t),end='\r')
        idx = idx_seq[t]
        X = X_seq[t]
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        equi = X_Stars[idx]
        # equi = np.zeros_like(X_Stars[idx])
        # calculate distance
        distances += [np.linalg.norm(X_Opt-X)]
        # calculate infinity loss
        # loss_infs += [infinity_loss(Domain,X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain,LineContour(equi,X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain,LineContour(equi,X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain,LineContour(X_Opt,X))
        regret_news += [integral(ci_new)]
        # calculate bound
        # area = 0.5*np.prod(np.sort([np.linalg.norm(X_Opt-equi),np.linalg.norm(X-X_Opt),np.linalg.norm(equi-X)])[:2])  # area upper bound
        area = herons(X_Opt,X,equi)  # exact area
        stokes += [CurlBounds[idx]*area]
        # # update prediction
        # X = BoxProjection(lo=0).P(X,-eta,Domain.F(X))
        # # update domain
        # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    # embed()
    ts_p1 = range(1,T+1)
    distances_avg = np.divide(np.cumsum(distances),ts_p1)
    # loss_infs_avg = np.divide(np.cumsum(loss_infs),ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards),ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news),ts_p1)
    stokes = np.divide(np.cumsum(stokes),ts_p1)

    # np.savez_compressed('NoRegret_MLN2c.npz',d_avg=distances_avg,
    #                     linf_avg=loss_infs_avg,rs_avg=regret_standards_avg,
    #                     rn_avg=regret_news_avg,stokes=stokes)

    plt.subplot(2, 1, 2)
    plt.plot(ts, distances_avg, 'k',label='Average Distance')
    # plt.title('Demonstration of No-Regret on MLN')
    plt.xlabel('Time Step')
    plt.ylabel('Euclidean Distance')
    plt.legend()

    plt.subplot(2, 1, 1)
    # plt.plot(ts, loss_infs_avg, 'k--', label=r'loss$_{\infty}$')
    plt.plot(ts, regret_standards_avg, 'r--o', markevery=T//20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=0, label='Stokes Bound')
    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    # plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0,T])
    # plt.ylim([-200,200])
    plt.legend(loc='lower right')
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN2c')

    # data = np.load('NoRegret2.npz')
    # distances_avg = data['d_avg']
    # loss_infs_avg = data['linf_avg']
    # regret_standards_avg = data['rs_avg']
    # regret_news_avg = data['rn_avg']
    # stokes = data['stokes']
    # ts = range(len(distances_avg))

    embed()
예제 #7
0
def Demo():

    #__SERVICE_ORIENTED_INTERNET__##############################################
    N = 10  # number of possible maps
    T = 1000  # number of time steps
    eta = 1e-3  # learning rate

    print('Creating Domains')

    # Define Domains and Compute Equilbria
    Domains = []
    X_Stars = []
    CurlBounds = []
    n = 0
    while len(Domains) < N:
        # Create Domain
        Network = CreateRandomNetwork(m=3, n=2, o=2, seed=None)
        Domain = SOI(Network=Network, alpha=2)

        # Initialize Starting Point
        Start = np.zeros(Domain.Dim)

        # Assert PD
        J = approx_jacobian(Domain.F, Start)
        eigs = np.linalg.eigvals(J + J.T)
        eigs_i = np.abs(np.linalg.eigvals(J - J.T))
        if not np.all(eigs > 0):
            continue
        print(eigs.min(), eigs.max())
        print(eigs_i.min(), eigs_i.max())
        _J = approx_jacobian(Domain.F, Start + 0.5)
        assert np.allclose(J, _J,
                           atol=1e-5)  # assert J is constant (unique for SOI)

        # Record Domain
        Domains += [Domain]

        # Calculate Initial Gap
        gap_0 = Domain.gap_rplus(Start)

        # Calculate Curl Bound
        CurlBounds += [
            np.sqrt(18) *
            svds(J, k=1, which='LM', return_singular_vectors=False).item()
        ]

        # Set Method
        Method = HeunEuler(Domain=Domain, P=BoxProjection(lo=0), Delta0=1e-3)

        # Set Options
        Init = Initialization(Step=-1e-10)
        Term = Termination(MaxIter=25000,
                           Tols=[(Domain.gap_rplus, 1e-6 * gap_0)])
        Repo = Reporting(Requests=[Domain.gap_rplus])
        Misc = Miscellaneous()
        Options = DescentOptions(Init, Term, Repo, Misc)

        # Print Stats
        PrintSimStats(Domain, Method, Options)

        # Start Solver
        tic = time.time()
        SOI_Results = Solve(Start, Method, Domain, Options)
        toc = time.time() - tic

        # Print Results
        PrintSimResults(Options, SOI_Results, Method, toc)

        # Record X_Star
        X_Star = SOI_Results.TempStorage['Data'][-1]
        X_Stars += [X_Star]
        n += 1
    X_Stars = np.asarray(X_Stars)

    print('Starting Online Learning')

    # Set First Prediction
    X = np.zeros(X_Stars.shape[1])
    # X = np.mean(X_Stars,axis=0)
    # X += np.random.rand(*X.shape)*np.linalg.norm(X)

    # Select First Domain
    # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    idx = np.random.choice(len(Domains))

    # Domain Sequence
    idx_seq = []
    X_seq = []
    F_seq = []

    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        # record prediction
        X_seq += [X]
        # record domain
        idx_seq += [idx]
        # retrieve domain
        Domain = Domains[idx]
        # record F
        FX = Domain.F(X)
        F_seq += [FX]
        # update prediction
        X = BoxProjection(lo=0).P(X, -eta, FX)
        # update domain
        # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
        idx = np.random.choice(len(Domains))

    L = np.sqrt(np.mean(np.linalg.norm(F_seq, axis=1)**2.))

    print('Computing Optimal Strategy')

    weights = np.bincount(idx_seq, minlength=len(Domains)) / len(idx_seq)
    print(weights)

    # Compute Equilibrium of Average Domain
    Domain = AverageDomains(Domains, weights=weights)

    # Set Method
    Method = HeunEuler_PhaseSpace(Domain=Domain,
                                  P=BoxProjection(lo=0),
                                  Delta0=1e-5)

    # Initialize Starting Point
    Start = np.zeros(Domain.Dim)

    # Assert PSD - sum of PSD is PSD doesn't hurt to check
    J = approx_jacobian(Domain.F, Start)
    eigs = np.linalg.eigvals(J + J.T)
    eigs_i = np.abs(np.linalg.eigvals(J - J.T))
    assert np.all(eigs > 0)
    print(eigs.min(), eigs.max())
    print(eigs_i.min(), eigs_i.max())

    # Calculate Initial Gap
    gap_0 = Domain.gap_rplus(Start)

    # Set Options
    Init = Initialization(Step=-1e-10)
    Term = Termination(MaxIter=25000, Tols=[(Domain.gap_rplus, 1e-10 * gap_0)])
    Repo = Reporting(Requests=[Domain.gap_rplus])
    Misc = Miscellaneous()
    Options = DescentOptions(Init, Term, Repo, Misc)

    # Print Stats
    PrintSimStats(Domain, Method, Options)

    # Start Solver
    tic = time.time()
    SOI_Results = Solve(Start, Method, Domain, Options)
    toc = time.time() - tic

    # Print Results
    PrintSimResults(Options, SOI_Results, Method, toc)

    print('Computing Regrets')

    # Record X_Opt
    X_Opt = SOI_Results.TempStorage['Data'][-1]
    # X_Opt = X_Stars[0]
    B = np.linalg.norm(X_Opt)

    eta_opt = B / (L * np.sqrt(2 * T))
    bound_opt = B * L * np.sqrt(2 * T)
    reg_bound = (B**2) / (2 * eta) + eta * T * L**2

    distances = []
    loss_infs = []
    regret_standards = []
    regret_news = []
    stokes = []
    ts = range(T)
    for t in ts:
        print('t = ' + str(t), end='\r')
        idx = idx_seq[t]
        X = X_seq[t]
        # retrieve domain
        Domain = Domains[idx]
        # retrieve equilibrium / reference vector
        equi = X_Stars[idx]
        # equi = np.zeros_like(X_Stars[idx])
        # calculate distance
        distances += [np.linalg.norm(X_Opt - X)]
        # calculate infinity loss
        # loss_infs += [infinity_loss(Domain,X)]
        # calculate standard regret
        ci_predict = ContourIntegral(Domain, LineContour(equi, X))
        predict_loss = integral(ci_predict)
        ci_opt = ContourIntegral(Domain, LineContour(equi, X_Opt))
        predict_opt = integral(ci_opt)
        regret_standards += [predict_loss - predict_opt]
        # calculate new regret
        ci_new = ContourIntegral(Domain, LineContour(X_Opt, X))
        regret_news += [integral(ci_new)]
        # calculate bound
        # area = 0.5*np.prod(np.sort([np.linalg.norm(X_Opt-equi),np.linalg.norm(X-X_Opt),np.linalg.norm(equi-X)])[:2])  # area upper bound
        area = herons(X_Opt, X, equi)  # exact area
        stokes += [CurlBounds[idx] * area]
        # # update prediction
        # X = BoxProjection(lo=0).P(X,-eta,Domain.F(X))
        # # update domain
        # idx = np.argmax(np.linalg.norm(X_Stars - X,axis=1))
    # embed()
    ts_p1 = range(1, T + 1)
    distances_avg = np.divide(np.cumsum(distances), ts_p1)
    # loss_infs_avg = np.divide(np.cumsum(loss_infs),ts_p1)
    regret_standards_avg = np.divide(np.cumsum(regret_standards), ts_p1)
    regret_news_avg = np.divide(np.cumsum(regret_news), ts_p1)
    stokes = np.divide(np.cumsum(stokes), ts_p1)

    # np.savez_compressed('NoRegret_MLN2c.npz',d_avg=distances_avg,
    #                     linf_avg=loss_infs_avg,rs_avg=regret_standards_avg,
    #                     rn_avg=regret_news_avg,stokes=stokes)

    plt.subplot(2, 1, 2)
    plt.plot(ts, distances_avg, 'k', label='Average Distance')
    # plt.title('Demonstration of No-Regret on MLN')
    plt.xlabel('Time Step')
    plt.ylabel('Euclidean Distance')
    plt.legend()

    plt.subplot(2, 1, 1)
    # plt.plot(ts, loss_infs_avg, 'k--', label=r'loss$_{\infty}$')
    plt.plot(ts,
             regret_standards_avg,
             'r--o',
             markevery=T // 20,
             label=r'regret$_{s}$')
    plt.plot(ts, regret_news_avg, 'b-', label=r'regret$_{n}$')
    # plt.fill_between(ts, regret_news_avg-stokes, regret_news_avg+stokes,
    #                  facecolor='c', alpha=0.2, zorder=0, label='Stokes Bound')
    plt.plot(ts, np.zeros_like(ts), 'w-', lw=1)
    # plt.xlabel('Time Step')
    plt.ylabel('Aggregate System-Wide Loss')
    plt.xlim([0, T])
    # plt.ylim([-200,200])
    plt.legend(loc='lower right')
    plt.title('Demonstration of No-Regret on MLN')

    plt.savefig('NoRegret_MLN2c')

    # data = np.load('NoRegret2.npz')
    # distances_avg = data['d_avg']
    # loss_infs_avg = data['linf_avg']
    # regret_standards_avg = data['rs_avg']
    # regret_news_avg = data['rn_avg']
    # stokes = data['stokes']
    # ts = range(len(distances_avg))

    embed()