예제 #1
0
    param2 = np.array([0.099])
    # ----

    # Training ----
    model = ParamNN(rateTrain=rateTrain,
                    lr=lr,
                    index=index,
                    size=size,
                    dataMode=dataMode)

    llosses, grads, preparam, exactparam = model.train(nItr=nItr,
                                                       alpha=alpha,
                                                       param2=param2)

    # Plot ----
    myPlot = burgers2Dplot.Plot(dataMode=dataMode, trialID=index)

    #myPlot.Loss1Data(llosses[0], labels=['test'], savename=f'poNN2d_testloss1_d{size}_part7_{param2}')
    #myPlot.Loss1Data(llosses[1], labels=['test'], savename=f'poNN2d_testloss2_d{size}_part5')

    #myPlot.Loss1Data(grads[0], labels=['test'], savename=f'poNN2d_testgrad1_d{size}_part5')
    #myPlot.Loss1Data(grads[1], labels=['test'], savename=f'poNN2d_testgrad2_d{size}_part5')

    myPlot.paramCls(preparam[0],
                    np.array([1.0]),
                    savename=f'poNN2d_testparam1_d{size}_part7_{param2}')
    myPlot.paramCls(preparam[1],
                    exactparam,
                    savename=f'poNN2d_testparam2_d{size}_part7_{param2}')
    # ----
예제 #2
0
    def __init__(self,
                 rateTrain=0.0,
                 lr=1e-3,
                 nBatch=100,
                 trialID=0,
                 size=0,
                 dataMode='test'):

        # parameter ----
        if dataMode == 'large':
            self.xDim = 20
            self.yDim = 20
        elif dataMode == 'middle':
            self.xDim = 15
            self.yDim = 15
        elif dataMode == 'small':
            self.xDim = 10
            self.yDim = 10

        self.tDim = 71
        self.nBatch = nBatch
        self.trialID = trialID
        self.lambdaDim = 2
        self.size = size
        # ----

        # for plot
        self.myPlot = burgers2Dplot.Plot(dataMode=dataMode, trialID=trialID)

        # Dataset ----
        self.myData = burgers2Ddata.Data(pdeMode='burgers2d',
                                         dataMode=dataMode)
        # all x,y[51,1] t[201,1]  & x,y,t[xdim or ydim,1] u,v[data,xdim,ydim,tdim] nu[data,]
        self.alltestX, self.alltestY, self.testT, self.testX, self.testY, self.testU, self.testV, self.testNU, self.idx, self.idy = self.myData.traintest(
            size=size)
        #pdb.set_trace()
        # ----

        # Placeholder ----
        # input u, v (one time) [xdim, ydim, t1]
        self.inobsu = tf.compat.v1.placeholder(
            tf.float32, shape=[None, self.xDim, self.yDim, self.tDim])
        self.inobsv = tf.compat.v1.placeholder(
            tf.float32, shape=[None, self.xDim, self.yDim, self.tDim])
        self.inobs = tf.concat([self.inobsu, self.inobsv], -1)
        # output param b
        self.y = tf.compat.v1.placeholder(tf.float32,
                                          shape=[None, self.lambdaDim])
        # ----

        # neural network ----
        self.predy = self.lambdaNN(self.inobs, rate=rateTrain)
        self.predy_test = self.lambdaNN(self.inobs, reuse=True)
        # ----

        # loss ----
        # param loss
        self.loss1 = tf.reduce_mean(tf.square(self.y[:, 0] - self.predy[:, 0]))
        self.loss2 = tf.reduce_mean(tf.square(self.y[:, 1] - self.predy[:, 1]))

        self.loss1_test = tf.reduce_mean(
            tf.square(self.y[:, 0] - self.predy_test[:, 0]))
        self.loss2_test = tf.reduce_mean(
            tf.square(self.y[:, 1] - self.predy_test[:, 1]))
        # ----

        self.loss = self.loss1 + self.loss2

        # Optimizer ----
        self.opt = tf.compat.v1.train.AdamOptimizer(lr).minimize(self.loss)
        # ----

        # ----
        config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(
            per_process_gpu_memory_fraction=0.1, allow_growth=True))
        self.sess = tf.compat.v1.Session(config=config)
        self.sess.run(tf.compat.v1.global_variables_initializer())
        self.saver = tf.compat.v1.train.Saver()