Esempio n. 1
0
    def multi_burgers(self, loadnamenpy):
        fu_list = []
        ICparams_list = []
        grid_list = []

        # Load simulation results

        fu, gridvars, ICparams = self.datamanager.loadSolution(loadnamenpy)
        difflearn = PDElearn(grid=PdfGrid(gridvars),
                             fu=fu,
                             ICparams=ICparams,
                             trainratio=self.trainratio,
                             debug=False,
                             verbose=False)

        F = Features(scase=self.case,
                     option=self.feature_opt,
                     variableCoef=self.variableCoef,
                     variableCoefOrder=self.variableCoefOrder,
                     variableCoefBasis=self.variableCoefBasis)
        featurelist, labels, featurenames = F.makeFeatures(
            PdfGrid(gridvars), fu, ICparams)

        Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(featurelist,
                                                            labels,
                                                            shuffle=False)

        # Fit data
        lin, rem_feature_idx = difflearn.train_sindy(Xtrain, ytrain, \
                RegCoef=self.RegCoef, maxiter=self.maxiter, tolerance=self.tolerance, sindy_iter=self.sindy_iter, sindy_alpha=self.sindy_alpha)
        difflearn.print_full_report(lin, Xtrain, ytrain, Xtest, ytest,
                                    rem_feature_idx, featurenames)
        return difflearn, featurenames, Xtrain, ytrain, Xtest, ytest
Esempio n. 2
0
    def multiIC(self):
        fuk_list = []
        fu_list = []
        ICparams_list = []
        grid_list = []

        # Load simulation results
        for i in range(len(self.loadname_list)):

            if i >= self.numexamples:
                break

            fuk, fu, gridvars, ICparams = self.datamanager.loadSolution(
                self.loadname_list[i] + '.npy')
            difflearn = PDElearn(fuk=fuk,
                                 grid=PdfGrid(gridvars),
                                 fu=fu,
                                 ICparams=ICparams,
                                 trainratio=self.trainratio,
                                 debug=False,
                                 verbose=False)

            F = Features(scase=self.case,
                         option=self.feature_opt,
                         variableCoef=self.variableCoef,
                         variableCoefOrder=self.variableCoefOrder,
                         variableCoefBasis=self.variableCoefBasis)
            featurelist, labels, featurenames = F.makeFeatures(
                PdfGrid(gridvars), fu, ICparams)

            Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(featurelist,
                                                                labels,
                                                                shuffle=False)
            #pdb.set_trace()

            if i == 0:
                Xall_train = Xtrain
                Xall_test = Xtest
                yall_train = ytrain
                yall_test = ytest
            else:
                Xall_train = np.concatenate((Xall_train, Xtrain), axis=0)
                yall_train = np.concatenate((yall_train, ytrain), axis=0)
                Xall_test = np.concatenate((Xall_test, Xtest), axis=0)
                yall_test = np.concatenate((yall_test, ytest), axis=0)

        # Fit data
        lin, rem_feature_idx = difflearn.train_sindy(Xall_train, yall_train, \
                RegCoef=self.RegCoef, maxiter=self.maxiter, tolerance=self.tolerance, sindy_iter=self.sindy_iter, sindy_alpha=self.sindy_alpha)
        difflearn.print_full_report(lin, Xall_train, yall_train, Xall_test,
                                    yall_test, rem_feature_idx, featurenames)
        return difflearn, featurenames, Xall_train, yall_train, Xall_test, yall_test
Esempio n. 3
0
def runML(setting):

    version = 1
    loadname = [makesavename(i, version) for i in IC]

    S1 = PdfSolver()
    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(len(IC)):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    lmnum = 40
    lmmin = 0.0000001
    lmmax = 0.00005
    lm = np.linspace(lmmin, lmmax, lmnum)
    options = ['linear', '2ndorder']

    if setting == 'sepIC':

        for opt in options:

            # Get number of maximum number of coefficients: maxncoef
            difflearn = PDElearn(fuk[0],
                                 grid,
                                 kmean[0],
                                 fu=fu[0],
                                 trainratio=0.8,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            #pdb.set_trace()
            maxncoef = len(featurenames) - 1

            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            regopts = 2
            er = np.zeros((len(IC), regopts, len(lm)))
            coef = np.zeros((len(IC), regopts, len(lm), maxncoef))
            numcoefl1 = np.zeros((len(IC), len(lm)))
            for i in range(len(IC)):
                print('---- Initial Condition ----')
                print('u0: ' + IC[i]['u0'])
                print('fu0: ' + IC[i]['fu0'])
                print('fk: ' + IC[i]['fk'])
                print('---- ----- ----')

                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=0.8,
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                for j in range(len(lm)):
                    lin1 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L1',
                                           RegCoef=lm[j],
                                           maxiter=5000,
                                           tolerance=0.00001)
                    lin2 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L2',
                                           RegCoef=lm[j],
                                           maxiter=5000)
                    DL = [lin1, lin2]

                    for k in range(len(DL)):
                        er[i, k,
                           j] = mean_squared_error(ytest, DL[k].predict(Xtest))
                        #pdb.set_trace()
                        for l in range(maxncoef):
                            coef[i, k, j, l] = DL[k].coef_[l]

                    numcoefl1[i, j] = DL[0].sparse_coef_.getnnz()

            ## Plotting

            # Error as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(IC)):
                plt.plot(lm, np.reshape(er[i, 0, :], (len(lm), )))
                leg.append(makesavename(IC[i], 1))

            figname = setting + ' reg coefficients L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Error')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

            # Sparsity as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(IC)):
                plt.plot(lm, numcoefl1[i])
                leg.append(makesavename(IC[i], 1))

            figname = setting + ' LinearIC Sparsity in L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Sparsity: Number of Coeffients')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

            # All Coefficients values as a function of lm
            for j in range(len(IC)):
                fig = plt.figure()
                leg = []
                for i in range(len(featurenames) - 1):
                    plt.plot(lm, np.reshape(coef[j, 0, :, i], (len(lm), )))
                    leg.append(featurenames[i + 1])

                figname = setting + ' Linear features L%d reg, %s' % (1, opt)
                plt.xlabel('Regularization Coefficient')
                plt.ylabel('Coefficient Values')
                plt.title(figname)
                plt.legend(leg)
                plt.savefig(FIGFILE + figname)

        plt.show()

    if setting == 'lumpIC':

        for opt in options:

            # Get number of maximum number of coefficients: maxncoef
            difflearn = PDElearn(fuk[0],
                                 grid,
                                 kmean[0],
                                 fu=fu[0],
                                 trainratio=0.8,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            #pdb.set_trace()
            maxncoef = len(featurenames) - 1

            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            regopts = 2
            er = np.zeros((regopts, len(lm)))
            coef = np.zeros((regopts, len(lm), maxncoef))
            numcoefl1 = np.zeros((len(lm), ))

            for i in range(len(IC)):
                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=0.8,
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                if i == 0:
                    X_train = Xtrain
                    y_train = ytrain
                    X_test = Xtest
                    y_test = ytest

                np.append(X_train, Xtrain, axis=0)
                np.append(y_train, ytrain, axis=0)
                np.append(X_test, Xtest, axis=0)
                np.append(y_test, ytest, axis=0)

            for j in range(len(lm)):
                lin1 = difflearn.train(X_train,
                                       y_train,
                                       RegType='L1',
                                       RegCoef=lm[j],
                                       maxiter=5000,
                                       tolerance=0.00001)
                lin2 = difflearn.train(X_train,
                                       y_train,
                                       RegType='L2',
                                       RegCoef=lm[j],
                                       maxiter=5000)
                DL = [lin1, lin2]

                for k in range(len(DL)):
                    er[k, j] = mean_squared_error(y_test,
                                                  DL[k].predict(X_test))
                    for l in range(maxncoef):
                        coef[k, j, l] = DL[k].coef_[l]

                numcoefl1[j] = DL[0].sparse_coef_.getnnz()

            ## Plotting

            # Error as a function of lm
            fig = plt.figure()
            leg = []
            plt.plot(lm, er[0, :])

            figname = setting + ' reg coefficients L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Error')
            plt.title(figname)
            plt.savefig(FIGFILE + figname + '.pdf')

            # Sparsity as a function of lm
            fig = plt.figure()
            leg = []
            plt.plot(lm, numcoefl1)

            figname = setting + ' LinearIC Sparsity in L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Sparsity: Number of Coeffients')
            plt.title(figname)
            plt.savefig(FIGFILE + figname + '.pdf')

            # All Coefficients values as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(featurenames) - 1):
                plt.plot(lm, np.reshape(coef[0, :, i], (len(lm), )))
                leg.append(featurenames[i + 1])

            figname = setting + ' LinearIC Linear features L%d reg, %s' % (1,
                                                                           opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Coefficient Values')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

        plt.show()
    DL = []
    regopts = 2
    er = np.zeros((regopts, len(lm)))
    coef = np.zeros((regopts, len(lm), maxncoef))
    numcoefl1 = np.zeros((len(lm), ))

    for i in range(len(IC)):
        difflearn = PDElearn(fuk[i],
                             grid,
                             kmean[i],
                             fu=fu[i],
                             trainratio=0.8,
                             debug=False)
        featurelist, labels, featurenames = difflearn.makeFeatures(option=opt)
        Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(featurelist,
                                                            labels,
                                                            shuffle=False)

        if i == 0:
            X_train = Xtrain
            y_train = ytrain
            X_test = Xtest
            y_test = ytest

        np.append(X_train, Xtrain, axis=0)
        np.append(y_train, ytrain, axis=0)
        np.append(X_test, Xtest, axis=0)
        np.append(y_test, ytest, axis=0)

    for j in range(len(lm)):
        lin1 = difflearn.train(X_train,
Esempio n. 5
0
def runML():
    loadname1 = 'u0exp_fu0gauss_fkgauss_1'
    loadname2 = 'u0lin_fu0gauss_fkgauss_1'
    loadname3 = 'u0lin_fu0gauss_fkuni_1'
    loadname4 = 'u0exp_fu0gauss_fkuni_1'
    loadname = [loadname1, loadname2, loadname3, loadname4]

    #loadname1 = 'u0exp_fu0gauss_fkgauss_0'
    #loadname2 = 'u0lin_fu0gauss_fkgauss_0'
    #loadname3 = 'u0lin_fu0gauss_fkuni_0'
    #loadname4 = 'u0exp_fu0gauss_fkuni_0'
    #loadname = [loadname1,loadname2,loadname3,loadname4]

    S1 = PdfSolver()

    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(4):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    # Train on dataset 1
    p = (0, 1, 2, 3)

    options = ['all', 'linear', '2ndorder']
    for opt in options:
        print('#################### %s ########################' % (opt))
        DL = []
        X = []
        y = []
        for i in p:
            difflearn = PDElearn(fuk[i],
                                 grid,
                                 kmean[i],
                                 fu=fu[i],
                                 trainratio=1,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(featurelist,
                                                                labels,
                                                                shuffle=False)
            X.append(Xtrain)
            y.append(ytrain)
            DL.append(difflearn)

        for ti in range(4):
            print('\n ###### Training on i = %d ###### \n ' % (ti))
            lin1 = DL[ti].train(X[ti],
                                y[ti],
                                RegType='L1',
                                RegCoef=0.000001,
                                maxiter=5000,
                                tolerance=0.00001)
            lin2 = DL[ti].train(X[ti],
                                y[ti],
                                RegType='L2',
                                RegCoef=0.01,
                                maxiter=5000)
            lin0 = DL[ti].train(X[ti], y[ti], RegType='L0')

            for i in range(4):
                print('---- %d ----' % (i))
                print(loadname[i])
                print("L1 Reg Test Score = %5.3f | RMS = %7.5f" % (lin1.score(
                    X[i], y[i]), mean_squared_error(y[i], lin1.predict(X[i]))))
                print("L2 Reg Test Score = %5.3f | RMS = %7.5f" % (lin2.score(
                    X[i], y[i]), mean_squared_error(y[i], lin2.predict(X[i]))))
                print("L0 Reg Test Score = %5.3f | RMS = %7.5f" % (lin0.score(
                    X[i], y[i]), mean_squared_error(y[i], lin0.predict(X[i]))))
Esempio n. 6
0
def runML(setting):
    Tinc = 10
    Tmin = 0.3
    Tmax = 0.9
    T = np.linspace(Tmin, Tmax, Tinc)
    version = 1
    loadname = [makesavename(i, version) for i in IC]

    S1 = PdfSolver()
    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(len(IC)):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    if setting == 'sepIC':

        options = ['linear', '2ndorder']
        for opt in options:
            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            er = np.zeros((len(IC), 3, len(T)))
            for i in range(len(fuk)):
                print('---- Initial Condition ----')
                print('u0: ' + IC[i]['u0'])
                print('fu0: ' + IC[i]['fu0'])
                print('fk: ' + IC[i]['fk'])
                print('---- ----- ----')

                for j in range(len(T)):
                    print('\n ###### Training %3.2f percent ###### \n ' %
                          (T[j]))
                    difflearn = PDElearn(fuk[i],
                                         grid,
                                         kmean[i],
                                         fu=fu[i],
                                         trainratio=T[j],
                                         debug=False)
                    featurelist, labels, featurenames = difflearn.makeFeatures(
                        option=opt)
                    Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                        featurelist, labels, shuffle=False)

                    lin0 = difflearn.train(Xtrain, ytrain, RegType='L0')
                    lin1 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L1',
                                           RegCoef=0.000001,
                                           maxiter=5000,
                                           tolerance=0.00001)
                    lin2 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L2',
                                           RegCoef=0.01,
                                           maxiter=5000)
                    DL = [lin0, lin1, lin2]

                    for k in range(len(DL)):
                        # Do it for each initial condition
                        er[i, k,
                           j] = mean_squared_error(ytest, DL[k].predict(Xtest))

            ## Plotting
            for l in range(len(DL)):
                fig = plt.figure()
                leg = []
                for i in range(len(IC)):
                    plt.plot(T, np.reshape(er[i, l, :], (len(T), )))
                    leg.append(makesavename(IC[i], 1))

                plt.xlabel('Training Time Span (\%)')
                plt.ylabel('Error')
                plt.title('Time Generalization for L%d reg, %s' % (l, opt))
                plt.legend(leg)

        plt.show()

    if setting == 'lumpIC':
        #### Lump initial conditions ####
        opt = 'linear'
        DL = []
        er = np.zeros((3, len(T)))

        for j in range(len(T)):
            print('\n ###### Training %3.2f percent ###### \n ' % (T[j]))

            for i in range(len(IC)):

                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=T[j],
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                if i == 0:
                    X_train = Xtrain
                    y_train = ytrain
                    X_test = Xtest
                    y_test = ytest

                np.append(X_train, Xtrain, axis=0)
                np.append(y_train, ytrain, axis=0)
                np.append(X_test, Xtest, axis=0)
                np.append(y_test, ytest, axis=0)

            lin0 = difflearn.train(X_train, y_train, RegType='L0')
            lin1 = difflearn.train(X_train,
                                   y_train,
                                   RegType='L1',
                                   RegCoef=0.00001,
                                   maxiter=5000,
                                   tolerance=0.00001)
            lin2 = difflearn.train(X_train,
                                   y_train,
                                   RegType='L2',
                                   RegCoef=0.01,
                                   maxiter=5000)
            DL = [lin0, lin1, lin2]

            for k in range(len(DL)):
                # Do it for each initial condition
                er[k, j] = mean_squared_error(y_test, DL[k].predict(X_test))

        ## Plotting
        for l in range(len(DL)):
            fig = plt.figure()
            figname = 'Time Generalization L%d reg - linear lumped IC' % (l)

            plt.plot(T, er[l, :])

            plt.xlabel('Training Time Span (\%)')
            plt.ylabel('Error')
            plt.title(figname)
            fig.savefig(figname + '.pdf')

        plt.show()