Esempio n. 1
0
    def makeFeatures_Conservative(self, grid, fu, ICparams):
        nt = len(grid.tt)
        nx = len(grid.xx)
        nu = len(grid.uu)
        dx = grid.xx[1] - grid.xx[0]
        dt = grid.tt[1] - grid.tt[0]
        du = grid.uu[1] - grid.uu[0]

        ddict = {'', 't', 'x', 'xx', 'xxx', 'U', 'UU', 'xU', 'xUU', 'xxU'}

        # Derivative terms dictionary
        # Computationally inefficient (fix: use previous derivatives)
        dimaxis = {'U': 0, 'x': 1, 't': 2}
        diminc = {'U': du, 'x': dx, 't': dt}
        maxder = {'U': 0, 'x': 0, 't': 0}
        fudict = dict.fromkeys(ddict, None)  # fu dictionary of derivatives
        dcount = dict.fromkeys(ddict,
                               None)  # Counts of derivatives for each term

        for term in ddict:
            dfu = fu.copy()  # copy?
            md = {'U': 0, 'x': 0, 't': 0}
            if len(term) > 0:
                for dim in term:
                    dfu = np.diff(dfu, axis=dimaxis[dim]) / diminc[dim]
                    md[dim] += 1
            dcount[term] = md
            fudict[term] = dfu
            for dim in term:
                maxder[dim] = md[dim] if md[dim] > maxder[dim] else maxder[dim]

        # Adjust dimensions to match
        mu = maxder['U']
        mx = maxder['x']
        mt = maxder['t']

        for term in fudict:
            uc = mu - dcount[term]['U']
            xc = mx - dcount[term]['x']
            tc = mt - dcount[term]['t']
            nu = fudict[term].shape[0]
            nx = fudict[term].shape[1]
            nt = fudict[term].shape[2]
            fudict[term] = fudict[term][uc // 2:nu - uc // 2 - uc % 2,
                                        xc // 2:nx - xc // 2 - xc % 2,
                                        tc // 2:nt - tc // 2 - tc % 2]

        xx_adj = grid.xx[mx // 2:len(grid.xx) - mx // 2 - mx % 2]
        uu_adj = grid.uu[mu // 2:len(grid.uu) - mu // 2 - mu % 2]

        # make labels and feature lists
        featurenames = []
        featurelist = []

        # Add feature of ones
        fudict['1'] = np.ones_like(fudict['t'])
        ddict.add('1')

        # Add variable coefficients
        deg = self.variableCoefOrder + 1

        if self.variableCoef:

            print("Variable coefficient type: " + self.variableCoefBasis)
            uu_grid, xx_grid = np.meshgrid(uu_adj, xx_adj, indexing='ij')
            fudict_var = dict.fromkeys([(term, j, k) for term in ddict
                                        for j in range(deg)
                                        for k in range(deg)])

            for term in ddict:
                for i in range(deg):
                    for j in range(deg):

                        f*x = np.zeros_like(uu_grid)
                        for k, u in enumerate(uu_adj):
                            for l, x in enumerate(xx_adj):

                                if self.variableCoefBasis == 'chebyshev':
                                    # too inefficient (find a way to get individual terms)
                                    ivec = np.zeros(i + 1)
                                    ivec[-1] = 1
                                    jvec = np.zeros(j + 1)
                                    jvec[-1] = 1
                                    f*x[k, l] = chebval(u, ivec) * chebval(
                                        x, jvec)

                                elif self.variableCoefBasis == 'simple_polynomial':
                                    f*x[k, l] = u**i * x**j

                                else:
                                    raise Exception(
                                        "variableCoefBasis %s doesn't exist".
                                        format(self.variableCoefBasis))

                        fudict_var[(term, i, j)] = f*x  # nu*nx

            for feat, coefarr in fudict_var.items():
                # feat = (term, i, j)
                fux_t = np.tile(coefarr.transpose(),
                                (nt - mt, 1, 1)).transpose()
                fudict_var[feat] = np.multiply(fudict[feat[0]], fux_t)

            # Too redundant - fix
            if self.option == '2ndorder':
                labels = fudict_var[('tt', 0, 0)]
                for key, val in fudict_var.items():
                    if key[0] != 'tt' and key[0] != 't':
                        featurenames.append('fu_' + key[0] + '^{' +
                                            str(key[1]) + str(key[2]) + '}')
                        featurelist.append(val)

            elif self.option == '1storder' or self.option == 'conservative':
                labels = fudict_var[('t', 0, 0)]
                for key, val in fudict_var.items():
                    if key[0] != 't':
                        featurenames.append('fu_' + key[0] + '^{' +
                                            str(key[1]) + str(key[2]) + '}')
                        featurelist.append(val)

            elif self.option == '1storder_close':
                S = PdfSolver(grid, ICparams=ICparams)
                print(S.int_kmean)
                labels = fudict_var[('t', 0, 0)] + S.int_kmean() * fudict_var[
                    ('x', 0, 0)]
                for key, val in fudict_var.items():
                    if key[0] != 't' and key != ('x', 0, 0):
                        featurenames.append('fu_' + key[0] + '^{' +
                                            str(key[1]) + str(key[2]) + '}')
                        featurelist.append(val)
            else:
                raise Exception("wrong option")

        else:  # Not variable coefficient

            if self.option == '2ndorder':
                labels = fudict['tt']
                for term, val in fudict.items():
                    if term != 'tt' and term != 't':
                        featurenames.append('fu_' + term)
                        featurelist.append(val)

            elif self.option == '1storder':
                labels = fudict['t']
                for term, val in fudict.items():
                    if term != 't':
                        featurenames.append('fu_' + term)
                        featurelist.append(val)

            elif self.option == '1storder_close':
                S = PdfSolver(grid, ICparams=ICparams)
                labels = fudict['t'] + S.int_kmean() * fudict['x']
                for term, val in fudict.items():
                    if term != 't':
                        featurenames.append('fu_' + term)
                        featurelist.append(val)

            else:
                raise Exception("wrong option")

        return featurelist, labels, featurenames
Esempio n. 2
0
def runsimulation():
    dt = 0.03
    t0 = 0.0
    tend = 4
    nt = int((tend - t0) / dt)

    dx = 0.03
    x0 = -1.5
    xend = 1.5
    nx = int((xend - x0) / dx)

    dk = 0.05
    k0 = -1.0
    kend = 1.0
    nk = int((kend - k0) / dk)

    du = 0.04
    u0 = -2.5
    uend = 2.5
    nu = int((uend - u0) / du)

    muk = 0.0
    sigk = 0.5
    sigu = 1.0
    mink = -0.5
    maxk = 0.5
    a = 1.0
    b = 0.0

    # Second set of data
    muk_2 = 0.2
    sigk_2 = 1
    sigu_2 = 1.1
    mink_2 = 0.0
    maxk_2 = 1.0
    a_2 = 1.0
    b_2 = 0.0

    runsimulation = 1
    IC_opt = 1

    solvopt = 'RandomKadvection'

    for i in range(1, 5):
        print(i)
        grid = PdfGrid(x0=x0,
                       xend=xend,
                       k0=k0,
                       kend=kend,
                       t0=t0,
                       tend=tend,
                       u0=u0,
                       uend=uend,
                       nx=nx,
                       nt=nt,
                       nk=nk,
                       nu=nu)
        grid.printDetails()
        S = PdfSolver(grid, save=True)
        S.setIC(option=i,
                a=a,
                b=b,
                mink=mink,
                maxk=maxk,
                muk=muk,
                sigk=sigk,
                sigu=sigu)

        time0 = time.time()
        fuk, fu, kmean, uu, kk, xx, tt = S.solve(solver_opt=solvopt)
        print('Compute time = ', time.time() - time0)
Esempio n. 3
0
    def makeFeatures_uxt(self, grid, fu, ICparams):
        nt = len(grid.tt)
        nx = len(grid.xx)
        nu = len(grid.uu)
        dx = grid.xx[1] - grid.xx[0]
        dt = grid.tt[1] - grid.tt[0]
        du = grid.uu[1] - grid.uu[0]

        if self.option == '2ndorder':
            ddict = {
                '', 't', 'tt', 'xt', 'x', 'xx', 'xxx', 'xxxx', 'U', 'UU',
                'UUU', 'xU', 'xUU', 'xxU', 'xxUU'
            }
        elif self.option == '1storder' or self.option == '1storder_close':
            ddict = {'', 't', 'x', 'xx', 'xxx', 'U', 'UU', 'xU', 'xUU', 'xxU'}
        elif self.option == 'conservative':
            ddict = {
                '', 't', 'U', 'Ux', 'Uxx', 'Uxxx', 'UU', 'UUx', 'UUxx', 'UUU',
                'UUUx'
            }
        else:
            raise Exception('option not valid')

        # Derivative terms dictionary
        # Computationally inefficient (fix: use previous derivatives)
        dimaxis = {'U': 0, 'x': 1, 't': 2}
        diminc = {'U': du, 'x': dx, 't': dt}
        maxder = {'U': 0, 'x': 0, 't': 0}
        fudict = dict.fromkeys(ddict, None)  # fu dictionary of derivatives
        dcount = dict.fromkeys(ddict,
                               None)  # Counts of derivatives for each term

        for term in ddict:
            dfu = fu.copy()  # copy?
            md = {'U': 0, 'x': 0, 't': 0}
            if len(term) > 0:
                for dim in term:
                    dfu = np.diff(dfu, axis=dimaxis[dim]) / diminc[dim]
                    md[dim] += 1
            dcount[term] = md
            fudict[term] = dfu
            for dim in term:
                maxder[dim] = md[dim] if md[dim] > maxder[dim] else maxder[dim]

        # Adjust dimensions to match
        mu = maxder['U']
        mx = maxder['x']
        mt = maxder['t']

        for term in fudict:
            uc = mu - dcount[term]['U']
            xc = mx - dcount[term]['x']
            tc = mt - dcount[term]['t']
            nu = fudict[term].shape[0]
            nx = fudict[term].shape[1]
            nt = fudict[term].shape[2]
            fudict[term] = fudict[term][uc // 2:nu - uc // 2 - uc % 2,
                                        xc // 2:nx - xc // 2 - xc % 2,
                                        tc // 2:nt - tc // 2 - tc % 2]

        xx_adj = grid.xx[mx // 2:len(grid.xx) - mx // 2 - mx % 2]
        uu_adj = grid.uu[mu // 2:len(grid.uu) - mu // 2 - mu % 2]

        # make labels and feature lists
        featurenames = []
        featurelist = []

        # Add feature of ones
        fudict['1'] = np.ones_like(fudict['t'])
        ddict.add('1')

        # Add variable coefficients
        deg = self.variableCoefOrder + 1

        if self.variableCoef:

            print("Variable coefficient type: " + self.variableCoefBasis)
            uu_grid, xx_grid = np.meshgrid(uu_adj, xx_adj, indexing='ij')
            fudict_var = dict.fromkeys([(term, j, k) for term in ddict
                                        for j in range(deg)
                                        for k in range(deg)])

            for term in ddict:
                for i in range(deg):
                    for j in range(deg):

                        f*x = np.zeros_like(uu_grid)
                        for k, u in enumerate(uu_adj):
                            for l, x in enumerate(xx_adj):

                                if self.variableCoefBasis == 'chebyshev':
                                    # too inefficient (find a way to get individual terms)
                                    ivec = np.zeros(i + 1)
                                    ivec[-1] = 1
                                    jvec = np.zeros(j + 1)
                                    jvec[-1] = 1
                                    f*x[k, l] = chebval(u, ivec) * chebval(
                                        x, jvec)

                                elif self.variableCoefBasis == 'simple_polynomial':
                                    f*x[k, l] = u**i * x**j

                                else:
                                    raise Exception(
                                        "variableCoefBasis %s doesn't exist".
                                        format(self.variableCoefBasis))

                        fudict_var[(term, i, j)] = f*x  # nu*nx

            for feat, coefarr in fudict_var.items():
                # feat = (term, i, j)
                fux_t = np.tile(coefarr.transpose(),
                                (nt - mt, 1, 1)).transpose()
                fudict_var[feat] = np.multiply(fudict[feat[0]], fux_t)

            # Too redundant - fix
            if self.option == '2ndorder':
                labels = fudict_var[('tt', 0, 0)]
                for key, val in fudict_var.items():
                    if key[0] != 'tt' and key[0] != 't':
                        featurenames.append('fu_' + key[0] + '^{' +
                                            str(key[1]) + str(key[2]) + '}')
                        featurelist.append(val)

            elif self.option == '1storder' or self.option == 'conservative':
                labels = fudict_var[('t', 0, 0)]
                for key, val in fudict_var.items():
                    if key[0] != 't':
                        featurenames.append('fu_' + key[0] + '^{' +
                                            str(key[1]) + str(key[2]) + '}')
                        featurelist.append(val)

            elif self.option == '1storder_close':
                # TODO: Make loadMetadata(filename, directory) into function

                mcD = DataIO(self.scase, directory=MCDIR)
                with open(mcD.casedir + 'metadata.txt', 'r') as jsonfile:
                    allmc_metadata = json.load(jsonfile)
                    mc_metadata = allmc_metadata[ICparams['MCfile'].split('.')
                                                 [0]]

                if self.scase == 'advection_reaction_randadv_analytical':
                    k_coeffs = mc_metadata['ICparams']['distparams'][0]
                    # TODO: add 'distk' for ICparams and find mean based on it instead
                    if mc_metadata['ICparams']['fu0'] == 'gaussians_k':
                        kmean = k_coeffs[0]
                    print('kmean = ', kmean)

                    if mc_metadata['ICparams']['source'] == 'quadratic':
                        labels = fudict_var[('t', 0, 0)] + kmean * fudict_var[
                            ('x', 0, 0)] + fudict_var[
                                ('U', 2, 0)] + 2 * fudict_var[('', 1, 0)]
                        removekeys = {('t', 0, 0), ('x', 0, 0), ('U', 2, 0),
                                      ('', 1, 0)}

                    elif mc_metadata['ICparams']['source'] == 'linear':
                        labels = fudict_var[('t', 0, 0)] + kmean * fudict_var[
                            ('x', 0, 0)] + fudict_var[
                                ('U', 1, 0)] + fudict_var[('', 0, 0)]
                        removekeys = {('t', 0, 0), ('x', 0, 0), ('U', 1, 0),
                                      ('', 0, 0)}

                    elif mc_metadata['ICparams']['source'] == 'logistic':
                        ## TODO: Assumes kr = K = 1.0
                        labels = fudict_var[('t', 0, 0)] + kmean * fudict_var[('x', 0, 0)] \
                        + fudict_var[('U', 1, 0)] - fudict_var[('U', 2, 0)] + fudict_var[('', 0, 0)] - 2*fudict_var[('', 1, 0)]
                        removekeys = {('t', 0, 0), ('x', 0, 0), ('U', 2, 0),
                                      ('U', 1, 0), ('', 1, 0), ('', 0, 0)}

                ## TODO: Try removing terms that appear in closure
                for key, val in fudict_var.items():
                    if key[0] != 't' and key not in removekeys:
                        featurenames.append('fu_' + key[0] + '^{' +
                                            str(key[1]) + str(key[2]) + '}')
                        featurelist.append(val)
            else:
                raise Exception("wrong option")

        else:  # Not variable coefficient

            if self.option == '2ndorder':
                labels = fudict['tt']
                for term, val in fudict.items():
                    if term != 'tt' and term != 't':
                        featurenames.append('fu_' + term)
                        featurelist.append(val)

            elif self.option == '1storder':
                labels = fudict['t']
                for term, val in fudict.items():
                    if term != 't':
                        featurenames.append('fu_' + term)
                        featurelist.append(val)

            elif self.option == '1storder_close':
                S = PdfSolver(grid, ICparams=ICparams)
                labels = fudict['t'] + S.int_kmean() * fudict['x']

                for term, val in fudict.items():
                    if term != 't':
                        featurenames.append('fu_' + term)
                        featurelist.append(val)

            else:
                raise Exception("wrong option")

        return featurelist, labels, featurenames
Esempio n. 4
0
for k0, kend in k_range:
    for u0, uend in u_range: 
        for x0, xend in x_range:

            for u0typei in u0type:
                for sigu0i in sigu0:
                    for ai, bi in zip(a, b):

                        for k_disti in k_dist:
                            if k_disti == 'gaussian':
                                kparam1, kparam2 = muk, sigk
                            elif k_disti == 'uniform':
                                kparam1, kparam2 = mink, maxk 
                            else:
                                print("wrong k distribution option")

                            for kparam1i, kparam2i in zip(kparam1, kparam2):

                                gridvars = {'u': [u0, uend, du], 'k': [k0, kend, dk], 't': [t_range[0], t_range[1], dt], 'x':[x0, xend, dx]}
                                ICparams = {'u0': u0typei, 
                                            'u0param': [ai, bi], 
                                            'fu0': 'gaussian',
                                            'fu0param': sigu0i, 
                                            'fk': k_disti, 
                                            'fkparam': [kparam1i, kparam2i]}

                                grid = PdfGrid(gridvars)
                                S = PdfSolver(grid, ICparams, save=True, case=case)
                                S.solve() 
Esempio n. 5
0
def runML(setting):

    version = 1
    loadname = [makesavename(i, version) for i in IC]

    S1 = PdfSolver()
    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(len(IC)):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    lmnum = 40
    lmmin = 0.0000001
    lmmax = 0.00005
    lm = np.linspace(lmmin, lmmax, lmnum)
    options = ['linear', '2ndorder']

    if setting == 'sepIC':

        for opt in options:

            # Get number of maximum number of coefficients: maxncoef
            difflearn = PDElearn(fuk[0],
                                 grid,
                                 kmean[0],
                                 fu=fu[0],
                                 trainratio=0.8,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            #pdb.set_trace()
            maxncoef = len(featurenames) - 1

            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            regopts = 2
            er = np.zeros((len(IC), regopts, len(lm)))
            coef = np.zeros((len(IC), regopts, len(lm), maxncoef))
            numcoefl1 = np.zeros((len(IC), len(lm)))
            for i in range(len(IC)):
                print('---- Initial Condition ----')
                print('u0: ' + IC[i]['u0'])
                print('fu0: ' + IC[i]['fu0'])
                print('fk: ' + IC[i]['fk'])
                print('---- ----- ----')

                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=0.8,
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                for j in range(len(lm)):
                    lin1 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L1',
                                           RegCoef=lm[j],
                                           maxiter=5000,
                                           tolerance=0.00001)
                    lin2 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L2',
                                           RegCoef=lm[j],
                                           maxiter=5000)
                    DL = [lin1, lin2]

                    for k in range(len(DL)):
                        er[i, k,
                           j] = mean_squared_error(ytest, DL[k].predict(Xtest))
                        #pdb.set_trace()
                        for l in range(maxncoef):
                            coef[i, k, j, l] = DL[k].coef_[l]

                    numcoefl1[i, j] = DL[0].sparse_coef_.getnnz()

            ## Plotting

            # Error as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(IC)):
                plt.plot(lm, np.reshape(er[i, 0, :], (len(lm), )))
                leg.append(makesavename(IC[i], 1))

            figname = setting + ' reg coefficients L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Error')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

            # Sparsity as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(IC)):
                plt.plot(lm, numcoefl1[i])
                leg.append(makesavename(IC[i], 1))

            figname = setting + ' LinearIC Sparsity in L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Sparsity: Number of Coeffients')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

            # All Coefficients values as a function of lm
            for j in range(len(IC)):
                fig = plt.figure()
                leg = []
                for i in range(len(featurenames) - 1):
                    plt.plot(lm, np.reshape(coef[j, 0, :, i], (len(lm), )))
                    leg.append(featurenames[i + 1])

                figname = setting + ' Linear features L%d reg, %s' % (1, opt)
                plt.xlabel('Regularization Coefficient')
                plt.ylabel('Coefficient Values')
                plt.title(figname)
                plt.legend(leg)
                plt.savefig(FIGFILE + figname)

        plt.show()

    if setting == 'lumpIC':

        for opt in options:

            # Get number of maximum number of coefficients: maxncoef
            difflearn = PDElearn(fuk[0],
                                 grid,
                                 kmean[0],
                                 fu=fu[0],
                                 trainratio=0.8,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            #pdb.set_trace()
            maxncoef = len(featurenames) - 1

            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            regopts = 2
            er = np.zeros((regopts, len(lm)))
            coef = np.zeros((regopts, len(lm), maxncoef))
            numcoefl1 = np.zeros((len(lm), ))

            for i in range(len(IC)):
                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=0.8,
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                if i == 0:
                    X_train = Xtrain
                    y_train = ytrain
                    X_test = Xtest
                    y_test = ytest

                np.append(X_train, Xtrain, axis=0)
                np.append(y_train, ytrain, axis=0)
                np.append(X_test, Xtest, axis=0)
                np.append(y_test, ytest, axis=0)

            for j in range(len(lm)):
                lin1 = difflearn.train(X_train,
                                       y_train,
                                       RegType='L1',
                                       RegCoef=lm[j],
                                       maxiter=5000,
                                       tolerance=0.00001)
                lin2 = difflearn.train(X_train,
                                       y_train,
                                       RegType='L2',
                                       RegCoef=lm[j],
                                       maxiter=5000)
                DL = [lin1, lin2]

                for k in range(len(DL)):
                    er[k, j] = mean_squared_error(y_test,
                                                  DL[k].predict(X_test))
                    for l in range(maxncoef):
                        coef[k, j, l] = DL[k].coef_[l]

                numcoefl1[j] = DL[0].sparse_coef_.getnnz()

            ## Plotting

            # Error as a function of lm
            fig = plt.figure()
            leg = []
            plt.plot(lm, er[0, :])

            figname = setting + ' reg coefficients L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Error')
            plt.title(figname)
            plt.savefig(FIGFILE + figname + '.pdf')

            # Sparsity as a function of lm
            fig = plt.figure()
            leg = []
            plt.plot(lm, numcoefl1)

            figname = setting + ' LinearIC Sparsity in L%d reg, %s' % (1, opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Sparsity: Number of Coeffients')
            plt.title(figname)
            plt.savefig(FIGFILE + figname + '.pdf')

            # All Coefficients values as a function of lm
            fig = plt.figure()
            leg = []
            for i in range(len(featurenames) - 1):
                plt.plot(lm, np.reshape(coef[0, :, i], (len(lm), )))
                leg.append(featurenames[i + 1])

            figname = setting + ' LinearIC Linear features L%d reg, %s' % (1,
                                                                           opt)
            plt.xlabel('Regularization Coefficient')
            plt.ylabel('Coefficient Values')
            plt.title(figname)
            plt.legend(leg)
            plt.savefig(FIGFILE + figname + '.pdf')

        plt.show()
import time
import pdb
from __init__ import *

IC1 = {'u0': 'exp', 'fu0': 'gauss', 'fk': 'uni'}
IC2 = {'u0': 'lin', 'fu0': 'gauss', 'fk': 'uni'}
IC3 = {'u0': 'lin', 'fu0': 'gauss', 'fk': 'gauss'}
IC4 = {'u0': 'exp', 'fu0': 'gauss', 'fk': 'gauss'}
#IC = [IC1, IC2, IC3, IC4]
IC = [IC2, IC3]  # Line IC
#IC = [IC1, IC4] # Exponential IC

version = 1
loadname = [makesavename(i, version) for i in IC]

S1 = PdfSolver()
fuk = []
fu = []
kmean = []
gridvars = []
ICparams = []

for i in range(len(IC)):
    fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
    fuk.append(fuki)
    fu.append(fui)
    kmean.append(kmeani)

uu, kk, xx, tt = gridvarsi
muk, sigk, mink, maxk, sigu, a, b = ICparamsi
Esempio n. 7
0
def runML():
    loadname1 = 'u0exp_fu0gauss_fkgauss_1'
    loadname2 = 'u0lin_fu0gauss_fkgauss_1'
    loadname3 = 'u0lin_fu0gauss_fkuni_1'
    loadname4 = 'u0exp_fu0gauss_fkuni_1'
    loadname = [loadname1, loadname2, loadname3, loadname4]

    #loadname1 = 'u0exp_fu0gauss_fkgauss_0'
    #loadname2 = 'u0lin_fu0gauss_fkgauss_0'
    #loadname3 = 'u0lin_fu0gauss_fkuni_0'
    #loadname4 = 'u0exp_fu0gauss_fkuni_0'
    #loadname = [loadname1,loadname2,loadname3,loadname4]

    S1 = PdfSolver()

    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(4):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    # Train on dataset 1
    p = (0, 1, 2, 3)

    options = ['all', 'linear', '2ndorder']
    for opt in options:
        print('#################### %s ########################' % (opt))
        DL = []
        X = []
        y = []
        for i in p:
            difflearn = PDElearn(fuk[i],
                                 grid,
                                 kmean[i],
                                 fu=fu[i],
                                 trainratio=1,
                                 debug=False)
            featurelist, labels, featurenames = difflearn.makeFeatures(
                option=opt)
            Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(featurelist,
                                                                labels,
                                                                shuffle=False)
            X.append(Xtrain)
            y.append(ytrain)
            DL.append(difflearn)

        for ti in range(4):
            print('\n ###### Training on i = %d ###### \n ' % (ti))
            lin1 = DL[ti].train(X[ti],
                                y[ti],
                                RegType='L1',
                                RegCoef=0.000001,
                                maxiter=5000,
                                tolerance=0.00001)
            lin2 = DL[ti].train(X[ti],
                                y[ti],
                                RegType='L2',
                                RegCoef=0.01,
                                maxiter=5000)
            lin0 = DL[ti].train(X[ti], y[ti], RegType='L0')

            for i in range(4):
                print('---- %d ----' % (i))
                print(loadname[i])
                print("L1 Reg Test Score = %5.3f | RMS = %7.5f" % (lin1.score(
                    X[i], y[i]), mean_squared_error(y[i], lin1.predict(X[i]))))
                print("L2 Reg Test Score = %5.3f | RMS = %7.5f" % (lin2.score(
                    X[i], y[i]), mean_squared_error(y[i], lin2.predict(X[i]))))
                print("L0 Reg Test Score = %5.3f | RMS = %7.5f" % (lin0.score(
                    X[i], y[i]), mean_squared_error(y[i], lin0.predict(X[i]))))
Esempio n. 8
0
    gridvars = {
        'u': [u0, uend, du],
        'k': [k0, kend, dk],
        't': [t0, tend, dt],
        'x': [x0, xend, dx]
    }
    ICparams = {
        'fu0': 'compact_gaussian',
        'fu0param': [mux, sigx, muU, sigU, rho],
        'fk': 'uniform',
        'fkparam': [mink, maxk]
    }

    grid = PdfGrid(gridvars)
    S = PdfSolver(grid, ICparams, save=True, case=case)
    S.solve()  # no need to return anything


def reaction():
    case = 'reaction_linear'

    dt = 0.01
    t0 = 0
    tend = 5

    du = 0.005
    u0 = -7
    uend = 7

    umean = 0.2
Esempio n. 9
0
def runML(setting):
    Tinc = 10
    Tmin = 0.3
    Tmax = 0.9
    T = np.linspace(Tmin, Tmax, Tinc)
    version = 1
    loadname = [makesavename(i, version) for i in IC]

    S1 = PdfSolver()
    fuk = []
    fu = []
    kmean = []
    gridvars = []
    ICparams = []

    for i in range(len(IC)):
        fuki, fui, kmeani, gridvarsi, ICparamsi = S1.loadSolution(loadname[i])
        fuk.append(fuki)
        fu.append(fui)
        kmean.append(kmeani)

    uu, kk, xx, tt = gridvarsi
    muk, sigk, mink, maxk, sigu, a, b = ICparamsi

    grid = PdfGrid()
    grid.setGrid(xx, tt, uu, kk)
    grid.printDetails()

    if setting == 'sepIC':

        options = ['linear', '2ndorder']
        for opt in options:
            print('#################### %s ########################' % (opt))
            DL = []
            X = []
            y = []
            error = []
            er = np.zeros((len(IC), 3, len(T)))
            for i in range(len(fuk)):
                print('---- Initial Condition ----')
                print('u0: ' + IC[i]['u0'])
                print('fu0: ' + IC[i]['fu0'])
                print('fk: ' + IC[i]['fk'])
                print('---- ----- ----')

                for j in range(len(T)):
                    print('\n ###### Training %3.2f percent ###### \n ' %
                          (T[j]))
                    difflearn = PDElearn(fuk[i],
                                         grid,
                                         kmean[i],
                                         fu=fu[i],
                                         trainratio=T[j],
                                         debug=False)
                    featurelist, labels, featurenames = difflearn.makeFeatures(
                        option=opt)
                    Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                        featurelist, labels, shuffle=False)

                    lin0 = difflearn.train(Xtrain, ytrain, RegType='L0')
                    lin1 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L1',
                                           RegCoef=0.000001,
                                           maxiter=5000,
                                           tolerance=0.00001)
                    lin2 = difflearn.train(Xtrain,
                                           ytrain,
                                           RegType='L2',
                                           RegCoef=0.01,
                                           maxiter=5000)
                    DL = [lin0, lin1, lin2]

                    for k in range(len(DL)):
                        # Do it for each initial condition
                        er[i, k,
                           j] = mean_squared_error(ytest, DL[k].predict(Xtest))

            ## Plotting
            for l in range(len(DL)):
                fig = plt.figure()
                leg = []
                for i in range(len(IC)):
                    plt.plot(T, np.reshape(er[i, l, :], (len(T), )))
                    leg.append(makesavename(IC[i], 1))

                plt.xlabel('Training Time Span (\%)')
                plt.ylabel('Error')
                plt.title('Time Generalization for L%d reg, %s' % (l, opt))
                plt.legend(leg)

        plt.show()

    if setting == 'lumpIC':
        #### Lump initial conditions ####
        opt = 'linear'
        DL = []
        er = np.zeros((3, len(T)))

        for j in range(len(T)):
            print('\n ###### Training %3.2f percent ###### \n ' % (T[j]))

            for i in range(len(IC)):

                difflearn = PDElearn(fuk[i],
                                     grid,
                                     kmean[i],
                                     fu=fu[i],
                                     trainratio=T[j],
                                     debug=False)
                featurelist, labels, featurenames = difflearn.makeFeatures(
                    option=opt)
                Xtrain, ytrain, Xtest, ytest = difflearn.makeTTsets(
                    featurelist, labels, shuffle=False)

                if i == 0:
                    X_train = Xtrain
                    y_train = ytrain
                    X_test = Xtest
                    y_test = ytest

                np.append(X_train, Xtrain, axis=0)
                np.append(y_train, ytrain, axis=0)
                np.append(X_test, Xtest, axis=0)
                np.append(y_test, ytest, axis=0)

            lin0 = difflearn.train(X_train, y_train, RegType='L0')
            lin1 = difflearn.train(X_train,
                                   y_train,
                                   RegType='L1',
                                   RegCoef=0.00001,
                                   maxiter=5000,
                                   tolerance=0.00001)
            lin2 = difflearn.train(X_train,
                                   y_train,
                                   RegType='L2',
                                   RegCoef=0.01,
                                   maxiter=5000)
            DL = [lin0, lin1, lin2]

            for k in range(len(DL)):
                # Do it for each initial condition
                er[k, j] = mean_squared_error(y_test, DL[k].predict(X_test))

        ## Plotting
        for l in range(len(DL)):
            fig = plt.figure()
            figname = 'Time Generalization L%d reg - linear lumped IC' % (l)

            plt.plot(T, er[l, :])

            plt.xlabel('Training Time Span (\%)')
            plt.ylabel('Error')
            plt.title(figname)
            fig.savefig(figname + '.pdf')

        plt.show()
Esempio n. 10
0
    def solve(self):
        dt = 0.05
        t0 = 0
        tend = 5

        dx = 0.05
        x0 = -8.0
        xend = 8.0

        dk = 0.01
        k0 = -6.3
        kend = 6.3

        du = 0.05
        u0 = -6.5
        uend = 6.5

        sigu0 = 1.1
        a = 1.0
        b = 0.0

        # IC fu(U; x)
        mux = 0.5
        sigx = 0.7
        muU = 0
        sigU = 1.2
        rho = 0

        ## Gaussian k
        muk = 0.5
        sigk = 1.0
        fkdist = 'gaussian'
        fkparam = [muk, sigk]

        ## Uniform k
        #mink=-0.5
        #maxk=0.5
        #fkdist = 'uniform'
        #fkparam = [mink, maxk]

        ########

        gridvars = {
            'u': [u0, uend, du],
            'k': [k0, kend, dk],
            't': [t0, tend, dt],
            'x': [x0, xend, dx]
        }
        ICparams = {
            'fu0': 'compact_gaussian',
            'fu0param': [mux, sigx, muU, sigU, rho],
            'fk': fkdist,
            'fkparam': fkparam
        }

        grid = PdfGrid(gridvars)
        S = PdfSolver(grid, ICparams, save=True, case=self.case)
        savename = S.solve_fu()  # no need to return anything
        print(savename)

        return savename