예제 #1
0
def runML(setting):

    version = 1
    loadname = [makesavename(i, version) for i in IC]

    S1 = PdfSolver()
    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(len(IC)):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    lmnum = 40
    lmmin = 0.0000001
    lmmax = 0.00005
    lm = np.linspace(lmmin, lmmax, lmnum)
    options = ['linear', '2ndorder']

    if setting == 'sepIC':

        for opt in options:

            # Get number of maximum number of coefficients: maxncoef
            difflearn = PDElearn(fuk[0],
                                 grid,
                                 kmean[0],
                                 fu=fu[0],
                                 trainratio=0.8,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            #pdb.set_trace()
            maxncoef = len(featurenames) - 1

            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            regopts = 2
            er = np.zeros((len(IC), regopts, len(lm)))
            coef = np.zeros((len(IC), regopts, len(lm), maxncoef))
            numcoefl1 = np.zeros((len(IC), len(lm)))
            for i in range(len(IC)):
                print('---- Initial Condition ----')
                print('u0: ' + IC[i]['u0'])
                print('fu0: ' + IC[i]['fu0'])
                print('fk: ' + IC[i]['fk'])
                print('---- ----- ----')

                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=0.8,
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                for j in range(len(lm)):
                    lin1 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L1',
                                           RegCoef=lm[j],
                                           maxiter=5000,
                                           tolerance=0.00001)
                    lin2 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L2',
                                           RegCoef=lm[j],
                                           maxiter=5000)
                    DL = [lin1, lin2]

                    for k in range(len(DL)):
                        er[i, k,
                           j] = mean_squared_error(ytest, DL[k].predict(Xtest))
                        #pdb.set_trace()
                        for l in range(maxncoef):
                            coef[i, k, j, l] = DL[k].coef_[l]

                    numcoefl1[i, j] = DL[0].sparse_coef_.getnnz()

            ## Plotting

            # Error as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(IC)):
                plt.plot(lm, np.reshape(er[i, 0, :], (len(lm), )))
                leg.append(makesavename(IC[i], 1))

            figname = setting + ' reg coefficients L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Error')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

            # Sparsity as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(IC)):
                plt.plot(lm, numcoefl1[i])
                leg.append(makesavename(IC[i], 1))

            figname = setting + ' LinearIC Sparsity in L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Sparsity: Number of Coeffients')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

            # All Coefficients values as a function of lm
            for j in range(len(IC)):
                fig = plt.figure()
                leg = []
                for i in range(len(featurenames) - 1):
                    plt.plot(lm, np.reshape(coef[j, 0, :, i], (len(lm), )))
                    leg.append(featurenames[i + 1])

                figname = setting + ' Linear features L%d reg, %s' % (1, opt)
                plt.xlabel('Regularization Coefficient')
                plt.ylabel('Coefficient Values')
                plt.title(figname)
                plt.legend(leg)
                plt.savefig(FIGFILE + figname)

        plt.show()

    if setting == 'lumpIC':

        for opt in options:

            # Get number of maximum number of coefficients: maxncoef
            difflearn = PDElearn(fuk[0],
                                 grid,
                                 kmean[0],
                                 fu=fu[0],
                                 trainratio=0.8,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            #pdb.set_trace()
            maxncoef = len(featurenames) - 1

            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            regopts = 2
            er = np.zeros((regopts, len(lm)))
            coef = np.zeros((regopts, len(lm), maxncoef))
            numcoefl1 = np.zeros((len(lm), ))

            for i in range(len(IC)):
                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=0.8,
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                if i == 0:
                    X_train = Xtrain
                    y_train = ytrain
                    X_test = Xtest
                    y_test = ytest

                np.append(X_train, Xtrain, axis=0)
                np.append(y_train, ytrain, axis=0)
                np.append(X_test, Xtest, axis=0)
                np.append(y_test, ytest, axis=0)

            for j in range(len(lm)):
                lin1 = difflearn.train(X_train,
                                       y_train,
                                       RegType='L1',
                                       RegCoef=lm[j],
                                       maxiter=5000,
                                       tolerance=0.00001)
                lin2 = difflearn.train(X_train,
                                       y_train,
                                       RegType='L2',
                                       RegCoef=lm[j],
                                       maxiter=5000)
                DL = [lin1, lin2]

                for k in range(len(DL)):
                    er[k, j] = mean_squared_error(y_test,
                                                  DL[k].predict(X_test))
                    for l in range(maxncoef):
                        coef[k, j, l] = DL[k].coef_[l]

                numcoefl1[j] = DL[0].sparse_coef_.getnnz()

            ## Plotting

            # Error as a function of lm
            fig = plt.figure()
            leg = []
            plt.plot(lm, er[0, :])

            figname = setting + ' reg coefficients L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Error')
            plt.title(figname)
            plt.savefig(FIGFILE + figname + '.pdf')

            # Sparsity as a function of lm
            fig = plt.figure()
            leg = []
            plt.plot(lm, numcoefl1)

            figname = setting + ' LinearIC Sparsity in L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Sparsity: Number of Coeffients')
            plt.title(figname)
            plt.savefig(FIGFILE + figname + '.pdf')

            # All Coefficients values as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(featurenames) - 1):
                plt.plot(lm, np.reshape(coef[0, :, i], (len(lm), )))
                leg.append(featurenames[i + 1])

            figname = setting + ' LinearIC Linear features L%d reg, %s' % (1,
                                                                           opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Coefficient Values')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

        plt.show()
예제 #2
0
def runML():
    loadname1 = 'u0exp_fu0gauss_fkgauss_1'
    loadname2 = 'u0lin_fu0gauss_fkgauss_1'
    loadname3 = 'u0lin_fu0gauss_fkuni_1'
    loadname4 = 'u0exp_fu0gauss_fkuni_1'
    loadname = [loadname1, loadname2, loadname3, loadname4]

    #loadname1 = 'u0exp_fu0gauss_fkgauss_0'
    #loadname2 = 'u0lin_fu0gauss_fkgauss_0'
    #loadname3 = 'u0lin_fu0gauss_fkuni_0'
    #loadname4 = 'u0exp_fu0gauss_fkuni_0'
    #loadname = [loadname1,loadname2,loadname3,loadname4]

    S1 = PdfSolver()

    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(4):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    # Train on dataset 1
    p = (0, 1, 2, 3)

    options = ['all', 'linear', '2ndorder']
    for opt in options:
        print('#################### %s ########################' % (opt))
        DL = []
        X = []
        y = []
        for i in p:
            difflearn = PDElearn(fuk[i],
                                 grid,
                                 kmean[i],
                                 fu=fu[i],
                                 trainratio=1,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(featurelist,
                                                                labels,
                                                                shuffle=False)
            X.append(Xtrain)
            y.append(ytrain)
            DL.append(difflearn)

        for ti in range(4):
            print('\n ###### Training on i = %d ###### \n ' % (ti))
            lin1 = DL[ti].train(X[ti],
                                y[ti],
                                RegType='L1',
                                RegCoef=0.000001,
                                maxiter=5000,
                                tolerance=0.00001)
            lin2 = DL[ti].train(X[ti],
                                y[ti],
                                RegType='L2',
                                RegCoef=0.01,
                                maxiter=5000)
            lin0 = DL[ti].train(X[ti], y[ti], RegType='L0')

            for i in range(4):
                print('---- %d ----' % (i))
                print(loadname[i])
                print("L1 Reg Test Score = %5.3f | RMS = %7.5f" % (lin1.score(
                    X[i], y[i]), mean_squared_error(y[i], lin1.predict(X[i]))))
                print("L2 Reg Test Score = %5.3f | RMS = %7.5f" % (lin2.score(
                    X[i], y[i]), mean_squared_error(y[i], lin2.predict(X[i]))))
                print("L0 Reg Test Score = %5.3f | RMS = %7.5f" % (lin0.score(
                    X[i], y[i]), mean_squared_error(y[i], lin0.predict(X[i]))))
예제 #3
0
fu = []
kmean = []
gridvars = []
ICparams = []

for i in range(len(IC)):
    fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
    fuk.append(fuki)
    fu.append(fui)
    kmean.append(kmeani)

uu, kk, xx, tt = gridvarsi
muk, sigk, mink, maxk, sigu, a, b = ICparamsi

grid = PdfGrid()
grid.setGrid(xx, tt, uu, kk)
grid.printDetails()

lmnum = 40
lmmin = 0.0000001
lmmax = 0.00003
lm = np.linspace(lmmin, lmmax, lmnum)
options = ['linear', '2ndorder']
error = []
cf = []
fn = []

for opt in options:

    # Get number of maximum number of coefficients: maxncoef
    difflearn = PDElearn(fuk[0],
예제 #4
0
def runML(setting):
    Tinc = 10
    Tmin = 0.3
    Tmax = 0.9
    T = np.linspace(Tmin, Tmax, Tinc)
    version = 1
    loadname = [makesavename(i, version) for i in IC]

    S1 = PdfSolver()
    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(len(IC)):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    if setting == 'sepIC':

        options = ['linear', '2ndorder']
        for opt in options:
            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            er = np.zeros((len(IC), 3, len(T)))
            for i in range(len(fuk)):
                print('---- Initial Condition ----')
                print('u0: ' + IC[i]['u0'])
                print('fu0: ' + IC[i]['fu0'])
                print('fk: ' + IC[i]['fk'])
                print('---- ----- ----')

                for j in range(len(T)):
                    print('\n ###### Training %3.2f percent ###### \n ' %
                          (T[j]))
                    difflearn = PDElearn(fuk[i],
                                         grid,
                                         kmean[i],
                                         fu=fu[i],
                                         trainratio=T[j],
                                         debug=False)
                    featurelist, labels, featurenames = difflearn.makeFeatures(
                        option=opt)
                    Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                        featurelist, labels, shuffle=False)

                    lin0 = difflearn.train(Xtrain, ytrain, RegType='L0')
                    lin1 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L1',
                                           RegCoef=0.000001,
                                           maxiter=5000,
                                           tolerance=0.00001)
                    lin2 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L2',
                                           RegCoef=0.01,
                                           maxiter=5000)
                    DL = [lin0, lin1, lin2]

                    for k in range(len(DL)):
                        # Do it for each initial condition
                        er[i, k,
                           j] = mean_squared_error(ytest, DL[k].predict(Xtest))

            ## Plotting
            for l in range(len(DL)):
                fig = plt.figure()
                leg = []
                for i in range(len(IC)):
                    plt.plot(T, np.reshape(er[i, l, :], (len(T), )))
                    leg.append(makesavename(IC[i], 1))

                plt.xlabel('Training Time Span (\%)')
                plt.ylabel('Error')
                plt.title('Time Generalization for L%d reg, %s' % (l, opt))
                plt.legend(leg)

        plt.show()

    if setting == 'lumpIC':
        #### Lump initial conditions ####
        opt = 'linear'
        DL = []
        er = np.zeros((3, len(T)))

        for j in range(len(T)):
            print('\n ###### Training %3.2f percent ###### \n ' % (T[j]))

            for i in range(len(IC)):

                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=T[j],
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                if i == 0:
                    X_train = Xtrain
                    y_train = ytrain
                    X_test = Xtest
                    y_test = ytest

                np.append(X_train, Xtrain, axis=0)
                np.append(y_train, ytrain, axis=0)
                np.append(X_test, Xtest, axis=0)
                np.append(y_test, ytest, axis=0)

            lin0 = difflearn.train(X_train, y_train, RegType='L0')
            lin1 = difflearn.train(X_train,
                                   y_train,
                                   RegType='L1',
                                   RegCoef=0.00001,
                                   maxiter=5000,
                                   tolerance=0.00001)
            lin2 = difflearn.train(X_train,
                                   y_train,
                                   RegType='L2',
                                   RegCoef=0.01,
                                   maxiter=5000)
            DL = [lin0, lin1, lin2]

            for k in range(len(DL)):
                # Do it for each initial condition
                er[k, j] = mean_squared_error(y_test, DL[k].predict(X_test))

        ## Plotting
        for l in range(len(DL)):
            fig = plt.figure()
            figname = 'Time Generalization L%d reg - linear lumped IC' % (l)

            plt.plot(T, er[l, :])

            plt.xlabel('Training Time Span (\%)')
            plt.ylabel('Error')
            plt.title(figname)
            fig.savefig(figname + '.pdf')

        plt.show()