def __init__(self, t='fig'):
        self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data', 5000)
        self.mydata.initsca(t=t)
        print("data init success!")

        config = tensorflow.ConfigProto()
        config.gpu_options.allow_growth = True  #允许显存增长
        set_session(tensorflow.Session(config=config))

        optimizer = Adam()
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()
        self.generator.compile(loss='mean_squared_error', optimizer=optimizer)

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.mydata.capsize, ))
        img = self.generator(z)

        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        validity = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, validity)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
Example #2
0
    def __init__(self):
        if 'session' in locals() and tensorflow.session is not None:
            print('Close interactive session')
            tensorflow.session.close()
        config = tensorflow.ConfigProto()
        config.gpu_options.allow_growth = True  #允许显存增长
        tensorflow.keras.backend.set_session(tensorflow.Session(config=config))
        print('GPU memory is allowed to growth.')
        tensorflow.keras.backend.clear_session()

        self.path="E:\deeplearning\程序\ECT\ECT\真实实验";
        empty1=scipy.io.loadmat(self.path+'\empty.mat')
        efull1=scipy.io.loadmat(self.path+'\efull.mat')
        lmc1=scipy.io.loadmat(self.path+'\lmc.mat')
        self.fltempty=np.asarray(empty1['Cap'])    #空管标定数据
        self.fltfull=np.asarray(efull1['Cap'])    #满管标定数据
        self.lmc  =np.asarray(lmc1['S'])       #灵敏场
        with open(self.path+'\calibration.txt','r') as f:
            lines=f.readlines()
            strempty=lines[1].split()
            intempty=list(map(int,strempty))
            self.intempty=np.asarray(intempty)
            strfull=lines[3].split()
            intfull=list(map(int,strfull))
            self.intfull=np.asarray(intfull)
        self.intdelt=self.intfull-self.intempty
        self.fltdelt=self.fltfull-self.fltempty
        index=np.argmax(self.intdelt)
        self.k=self.intdelt[index]/self.fltdelt[0][index]
        print (self.k)
        self.draw=ECTdata('E:\deeplearning\ECT\数据生成\data',size=200)
        self.draw.initsca(t='tri')
        self.dn=DN()
        self.land=Land()
Example #3
0
 def __init__(self,t='fig'):      
     self.mydata=ECTdata('E:\deeplearning\ECT\数据生成\data',5000)
     self.mydata.initsca(t=t)
     print("data init success!")
     #关闭上次未完全关闭的会话
     if 'session' in locals() and tensorflow.session is not None:
         print('Close interactive session')
         tensorflow.session.close()
     config = tensorflow.ConfigProto()
     config.gpu_options.allow_growth = True  #允许显存增长
     set_session(tensorflow.Session(config=config))
     print('GPU memory is allowed to growth.')
Example #4
0
    def __init__(self):
        # if 'session' in locals() and tensorflow.session is not None:
        #     print('Close interactive session')
        #     tensorflow.session.close()
        # config = tensorflow.ConfigProto()
        # config.gpu_options.allow_growth = True  #允许显存增长
        # tensorflow.keras.backend.set_session(tensorflow.Session(config=config))
        # print('GPU memory is allowed to growth.')
        # tensorflow.keras.backend.clear_session()

        self.path = "E:\deeplearning\程序\ECT\ECT\真实实验"
        empty1 = scipy.io.loadmat(self.path + '\empty.mat')
        efull1 = scipy.io.loadmat(self.path + '\efull.mat')
        lmc1 = scipy.io.loadmat(self.path + '\lmc.mat')
        self.fltempty = np.asarray(empty1['Cap'])  #空管标定数据
        self.fltfull = np.asarray(efull1['Cap'])  #满管标定数据
        self.lmc = np.asarray(lmc1['S'])  #灵敏场
        self.path = "E:\会议项目\第九届流态化会议\处理数据\加料高度\\v15l168\\2016-12-13_13-04-28.203"
        with open(self.path + '\calibration.txt', 'r') as f:
            lines = f.readlines()
            strempty = lines[1].split()
            intempty = list(map(int, strempty))
            self.intempty = np.asarray(intempty)
            strfull = lines[3].split()
            intfull = list(map(int, strfull))
            self.intfull = np.asarray(intfull)
        with open("E:\会议项目\第九届流态化会议\处理数据\\app\\option\\calibration\\efull.txt",
                  'r') as f:
            l = f.readline()
            strfull = l.split()
            fltfull = list(map(float, strfull))
            self.oldfltfull = np.asarray(fltfull)
        with open("E:\会议项目\第九届流态化会议\处理数据\\app\\option\\calibration\\empty.txt",
                  'r') as f:
            l = f.readline()
            strempty = l.split()
            fltempty = list(map(float, strempty))
            self.oldfltempty = np.asarray(fltempty)

        self.intdelt = self.intfull - self.intempty
        self.fltdelt = self.fltfull - self.fltempty
        self.oldfltdelt = self.oldfltfull - self.oldfltempty
        index = np.argmax(self.intdelt)
        self.k = self.intdelt[index] / self.fltdelt[0][index]
        self.k2 = self.intdelt[index] / self.oldfltdelt[index]
        print(self.k)
        print(self.k2)
        self.draw = ECTdata('E:\deeplearning\ECT\数据生成\data', size=200)
        self.draw.initsca(t='tri')
        self.dn = DN()
        self.land = Land()
 def __init__(self, t='tri', sample=20000):
     self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data', sample)
     self.mydata.initsca(t=t)
     self.sample = sample
     print("data init success!")
     #print(self.mydata.lmc.shape)   28,702
     self.Srow = np.sum(self.mydata.lmc, axis=0)  #702
     self.Scol = np.sum(self.mydata.lmc, axis=1)  #28
     self.SLBP = np.zeros([self.mydata.capsize, self.mydata.imgsize])
     self.SLAND = np.zeros([self.mydata.capsize, self.mydata.imgsize])
     for i in range(self.mydata.imgsize):
         for j in range(self.mydata.capsize):
             self.SLBP[j][i] = self.mydata.lmc[j][i] / self.Srow[i]
     self.Srow2 = np.sum(self.SLBP, axis=1)
     for i in range(self.mydata.capsize):
         for j in range(self.mydata.imgsize):
             self.SLAND[i][j] = self.SLBP[i][j] / self.Srow2[i]
     self.yLBP = np.zeros([4, self.mydata.imgsize])
     self.yLAND = np.zeros([4, self.mydata.imgsize])
Example #6
0
class BP:
    def __init__(self, t='fig'):
        self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data', 20000)
        self.mydata.initsca(t=t)
        print("data init success!")

    def train(self, times):
        config = tensorflow.ConfigProto()
        config.gpu_options.allow_growth = True  #允许显存增长
        set_session(tensorflow.Session(config=config))
        if 'session' in locals() and tensorflow.session is not None:
            print('Close interactive session')
            tensorflow.session.close()

        input = Input(shape=(self.mydata.capsize, ))
        decoded = Dense(1024, activation='relu')(input)
        decoded1 = Dense(1024, activation='relu')(decoded)
        decoded2 = Dense(1024, activation='relu')(decoded1)
        decoded3 = Dense(1024, activation='relu')(decoded2)
        output = Dense(self.mydata.imgsize, activation='relu')(decoded3)
        #decoded2 = Dense(3072, activation='relu')(input)
        #output=Dense(self.mydata.imgsize, activation='tanh')(decoded2)

        self.model = Model(inputs=input, outputs=output)
        self.model.compile(optimizer='adam', loss='mean_squared_error')
        self.model.fit(self.mydata.captrain,
                       self.mydata.imgtrain,
                       epochs=times,
                       shuffle=True,
                       validation_data=(self.mydata.captest,
                                        self.mydata.imgtest))
        p = self.model.evaluate(self.mydata.captest, self.mydata.imgtest)
        self.model.save("BP4layer.h5")
        print(p)

    def printpic(self, index, t='fig'):
        self.mydata.drawsca(self.mydata.imgtest[index], t=t)
        y = self.model.predict(self.mydata.captest)
        y[y > 1] = 1
        y[y < 0] = 0
        self.mydata.drawsca(y[index], t=t)
        mp.show()
Example #7
0
class realplay():
    def __init__(self):
        # if 'session' in locals() and tensorflow.session is not None:
        #     print('Close interactive session')
        #     tensorflow.session.close()
        # config = tensorflow.ConfigProto()
        # config.gpu_options.allow_growth = True  #允许显存增长
        # tensorflow.keras.backend.set_session(tensorflow.Session(config=config))
        # print('GPU memory is allowed to growth.')
        # tensorflow.keras.backend.clear_session()

        self.path = "E:\deeplearning\程序\ECT\ECT\真实实验"
        empty1 = scipy.io.loadmat(self.path + '\empty.mat')
        efull1 = scipy.io.loadmat(self.path + '\efull.mat')
        lmc1 = scipy.io.loadmat(self.path + '\lmc.mat')
        self.fltempty = np.asarray(empty1['Cap'])  #空管标定数据
        self.fltfull = np.asarray(efull1['Cap'])  #满管标定数据
        self.lmc = np.asarray(lmc1['S'])  #灵敏场
        self.path = "E:\会议项目\第九届流态化会议\处理数据\加料高度\\v15l168\\2016-12-13_13-04-28.203"
        with open(self.path + '\calibration.txt', 'r') as f:
            lines = f.readlines()
            strempty = lines[1].split()
            intempty = list(map(int, strempty))
            self.intempty = np.asarray(intempty)
            strfull = lines[3].split()
            intfull = list(map(int, strfull))
            self.intfull = np.asarray(intfull)
        with open("E:\会议项目\第九届流态化会议\处理数据\\app\\option\\calibration\\efull.txt",
                  'r') as f:
            l = f.readline()
            strfull = l.split()
            fltfull = list(map(float, strfull))
            self.oldfltfull = np.asarray(fltfull)
        with open("E:\会议项目\第九届流态化会议\处理数据\\app\\option\\calibration\\empty.txt",
                  'r') as f:
            l = f.readline()
            strempty = l.split()
            fltempty = list(map(float, strempty))
            self.oldfltempty = np.asarray(fltempty)

        self.intdelt = self.intfull - self.intempty
        self.fltdelt = self.fltfull - self.fltempty
        self.oldfltdelt = self.oldfltfull - self.oldfltempty
        index = np.argmax(self.intdelt)
        self.k = self.intdelt[index] / self.fltdelt[0][index]
        self.k2 = self.intdelt[index] / self.oldfltdelt[index]
        print(self.k)
        print(self.k2)
        self.draw = ECTdata('E:\deeplearning\ECT\数据生成\data', size=200)
        self.draw.initsca(t='tri')
        self.dn = DN()
        self.land = Land()

    def play(self):
        with open(self.path + '\cdata1.txt', 'r') as f:
            self.lines = f.readlines()
            self.lineindex = 0

        fig, ax = plt.subplots(1, 2)
        self.ax1 = ax[0]
        self.ax2 = ax[1]

        initarr = np.ones([200, 200])
        line1 = self.ax1.imshow(initarr,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        line2 = self.ax2.imshow(initarr,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)

        self.index = 1
        self.lineindex = 300
        #frames 动画长度,一次循环包含的帧数,在函数运行时,其值会传递给函数update(n)的形参“n”
        ani = animation.FuncAnimation(fig,
                                      self.calculate,
                                      frames=self.index,
                                      init_func=self.initpic,
                                      blit=True)
        #plt.colorbar()
        ax[0].axis('off')
        ax[1].axis('off')
        #fig.colorbar(line2,ax=[ax[0],ax[1]])
        cax = plt.axes([0.92, 0.25, 0.025, 0.48])
        fig.colorbar(line2, cax=cax)
        plt.show()

    def play2(self):
        with open(self.path + '\cdata1.txt', 'r') as f:
            self.lines = f.readlines()
            self.lineindex = 0

        a = self.lines[3].split(' ', 1)
        strmeasure = a[1].split()
        fltmeasure = list(map(float, strmeasure))
        measure = np.asarray(fltmeasure)
        print(a[0])

        de = (fltmeasure - self.oldfltempty) * self.k2

        fltmeasure = de / self.k + self.fltempty

        d = np.divide(np.subtract(fltmeasure, self.fltempty),
                      self.fltdelt)  #电容归一化

        # ydn=self.dn.caul(d).T.reshape(-1)
        yland = self.land.caul(d).reshape(-1)
        t = 'tri'
        # drawdn=self.draw.drawdata(ydn,t=t)
        drawland = self.draw.drawdata(yland, t=t)

        fig, ax = plt.subplots(1, 2)
        self.ax1 = ax[0]
        self.ax2 = ax[1]

        # line1=self.ax1.imshow(drawdn,cmap=plt.get_cmap('jet'),vmin=0,vmax=1)
        line1 = self.ax1.imshow(drawland,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        line2 = self.ax2.imshow(drawland,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        plt.show()

    def initpic(self):
        initarr = np.ones([200, 200])
        line1 = self.ax1.imshow(initarr,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        line2 = self.ax2.imshow(initarr,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        return line1, line2

    def calculate(self, index):

        a = self.lines[self.lineindex].split(' ', 1)
        strmeasure = a[1].split()
        fltmeasure = list(map(float, strmeasure))
        measure = np.asarray(fltmeasure)
        self.lineindex = self.lineindex + 10
        print(a[0])

        de = (fltmeasure - self.oldfltempty) * self.k2
        fltmeasure = de / self.k + self.fltempty

        d = np.divide(np.subtract(fltmeasure, self.fltempty),
                      self.fltdelt)  #电容归一化

        ydn = self.dn.caul(d).T.reshape(-1)
        yland = self.land.caul(d).reshape(-1)
        t = 'tri'
        drawdn = self.draw.drawdata(ydn, t=t)
        drawland = self.draw.drawdata(yland, t=t)

        line1 = self.ax1.imshow(drawdn,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        line2 = self.ax2.imshow(drawland,
                                cmap=plt.get_cmap('jet'),
                                vmin=0,
                                vmax=1)
        #line1=self.ax1.imshow(drawdn)
        #line1=self.ax1.imshow(drawland)

        return line1, line2
Example #8
0
class DN():
    def __init__(self, t='fig', sample=0):
        self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data2', size=sample)
        self.mydata.initsca(t=t)
        print("data init success!")
        #关闭上次未完全关闭的会话
        if 'session' in locals() and tensorflow.session is not None:
            print('Close interactive session')
            tensorflow.session.close()
        config = tensorflow.ConfigProto()
        config.gpu_options.allow_growth = True  #允许显存增长
        set_session(tensorflow.Session(config=config))
        print('GPU memory is allowed to growth.')
        tensorflow.keras.backend.clear_session()

    def buildmodel(self):
        model = Sequential()

        #model.add(MaxPooling2D((1,1))
        model.add(UpSampling2D((2, 2), input_shape=(8, 8, 1)))
        model.add(Conv2D(8, 4, 1, activation='relu', padding='same'))
        model.add(Dropout(0.2))
        #model.add(LeakyReLU())
        #model.add(MaxPooling2D((1,1)))
        model.add(UpSampling2D((2, 2)))
        model.add(Conv2D(8, 4, 1, activation='relu', padding='same'))
        model.add(Dropout(0.2))
        #model.add(LeakyReLU())
        #model.add(MaxPooling2D((1,1)))
        model.add(UpSampling2D((2, 2)))
        model.add(Conv2D(4, 4, 1, activation='relu', padding='same'))
        model.add(Dropout(0.2))
        #model.add(LeakyReLU())
        model.add(Flatten())
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(2048, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(self.mydata.imgsize, activation='relu'))

        model.summary()
        self.model = model

    def train(self, times):
        self.buildmodel()
        self.model.compile(optimizer='Adam', loss='mse')

        TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
        os.mkdir('./logs/' + TIMESTAMP)
        # log_dir:log_dir:保存日志文件的地址
        # histogram_freq:计算各个层激活值直方图的频率(每多少个epoch计算一次),如果设置为0则不计算。
        # tensorboard=TensorBoard(log_dir='./logs/'+TIMESTAMP,histogram_freq=1)

        # filename:字符串,保存模型的路径
        # monitor:需要监视的值
        # verbose:信息展示模式,0或1
        # save_best_only:当设置为True时,将只保存在验证集上性能最好的模型
        #checkpointer = ModelCheckpoint(filepath="./logs/"+TIMESTAMP+"/weights.hdf5", verbose=1, save_best_only=True)

        # monitor:需要监视的量
        # patience:当early stop被激活(如发现loss相比上一个epoch训练没有下降),则经过patience个epoch后停止训练。
        # verbose:信息展示模式
        # mode:‘auto’,‘min’,‘max’之一,在min模式下,如果检测值停止下降则中止训练。在max模式下,当检测值不再上升则停止训练。
        earlystop = EarlyStopping(monitor='val_loss',
                                  patience=10,
                                  verbose=0,
                                  mode='min')

        shutil.copy('./ECTDN.py', './logs/' + TIMESTAMP + '/ECTDN.py')

        self.model.fit(self.trainX,
                       self.mydata.imgtrain,
                       epochs=times,
                       shuffle=True,
                       validation_data=(self.testX, self.mydata.imgtest),
                       callbacks=[earlystop])
        self.model.save("./logs/" + TIMESTAMP + "/weights.hdf5")
        print("Model has been saved.")

        start = time.clock()
        e = self.model.evaluate(self.testX, self.mydata.imgtest)
        end = time.clock()
        print("测试集损失为%f" % e)
        print("重建使用时间为:%f" % (end - start))

    def printpic(self, index, t='fig'):
        self.mydata.drawsca(self.mydata.imgtest[index], t=t)
        y = self.model.predict(self.testX)
        y[y > 1] = 1
        y[y < 0] = 0
        self.mydata.drawsca(y[index], t=t)
        mp.show()
        s = np.std(y[index] - self.mydata.imgtest[index])
        print("标准差为%f" % s)

    def printLastTest(self, index, t='fig'):

        self.mydata.drawsca(self.mydata.lastTestimages[index], t=t)
        y = self.model.predict(self.lastTestX)
        y[y > 0.5] = y[y > 0.5] * 2
        y[y > 1] = 1
        y[y < 0] = 0
        self.mydata.drawsca(y[index], t=t)
        mp.show()
        s = np.std(y[index] - self.mydata.lastTestimages[index])
        print("标准差为%f" % s)

        np.savetxt("data\lastTestimages.txt",
                   self.mydata.lastTestimages,
                   fmt='%f',
                   delimiter=',',
                   newline='\n')
        np.savetxt("data\predictimgs.txt",
                   y,
                   fmt='%f',
                   delimiter=',',
                   newline='\n')

    def toMatrix(self):
        trainlen = self.mydata.captrain.shape[0]
        self.trainX = np.zeros([trainlen, 8, 8])
        for n in range(trainlen):
            k = 0
            for i in range(7):
                for j in range(i + 1, 8):
                    self.trainX[n][i][j] = self.mydata.captrain[n][k]
                    self.trainX[n][j][i] = self.trainX[n][i][j]
                    k = k + 1
        testlen = self.mydata.captest.shape[0]
        self.testX = np.zeros([testlen, 8, 8])
        for n in range(testlen):
            k = 0
            for i in range(7):
                for j in range(i + 1, 8):
                    self.testX[n][i][j] = self.mydata.captest[n][k]
                    self.testX[n][j][i] = self.testX[n][i][j]
                    k = k + 1
        self.trainX.shape = (-1, 8, 8, 1)
        self.testX.shape = (-1, 8, 8, 1)

        len = 4
        self.lastTestX = np.zeros([len, 8, 8])
        for n in range(len):
            k = 0
            for i in range(7):
                for j in range(i + 1, 8):
                    self.lastTestX[n][i][j] = self.mydata.lastTestcaps[n][k]
                    self.lastTestX[n][j][i] = self.lastTestX[n][i][j]
                    k = k + 1
        self.lastTestX.shape = (-1, 8, 8, 1)

    def loadmodel(self):
        model = load_model(
            'E:\\deeplearning\\程序\\ECT\\ECT\\logs\\DN190909-01\\weights.hdf5')
        model.summary()
        self.model = model
class Traditional:
    def __init__(self, t='tri', sample=20000):
        self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data', sample)
        self.mydata.initsca(t=t)
        self.sample = sample
        print("data init success!")
        #print(self.mydata.lmc.shape)   28,702
        self.Srow = np.sum(self.mydata.lmc, axis=0)  #702
        self.Scol = np.sum(self.mydata.lmc, axis=1)  #28
        self.SLBP = np.zeros([self.mydata.capsize, self.mydata.imgsize])
        self.SLAND = np.zeros([self.mydata.capsize, self.mydata.imgsize])
        for i in range(self.mydata.imgsize):
            for j in range(self.mydata.capsize):
                self.SLBP[j][i] = self.mydata.lmc[j][i] / self.Srow[i]
        self.Srow2 = np.sum(self.SLBP, axis=1)
        for i in range(self.mydata.capsize):
            for j in range(self.mydata.imgsize):
                self.SLAND[i][j] = self.SLBP[i][j] / self.Srow2[i]
        self.yLBP = np.zeros([4, self.mydata.imgsize])
        self.yLAND = np.zeros([4, self.mydata.imgsize])

    def LBP(self):
        for i in range(4):
            self.yLBP[i] = np.matmul(
                self.SLAND.T, self.mydata.lastTestcaps[i].reshape(
                    -1, 1)).reshape(-1) * self.mydata.imgsize / 2

    def LAND(self):
        X = np.matrix(np.matmul(self.SLAND.T, self.SLAND))
        e, v = np.linalg.eig(X)
        ee = np.amax(e)
        a = 2 / ee
        start = time.clock()
        for i in range(4):
            g = np.matmul(self.SLAND.T, self.mydata.lastTestcaps[i].reshape(
                -1, 1)).reshape(-1) * self.mydata.imgsize / 28
            for k in range(1000):
                gk = g - a * np.dot(
                    self.SLAND.T,
                    np.dot(self.SLAND, g) - self.mydata.lastTestcaps[i] * 1.5)
                gk[gk > 1] = 1
                gk[gk < 0] = 0
                g = gk
            self.yLAND[i] = g
        end = time.clock()
        print("重建使用时间为:%f" % (end - start))

    def printpic(self, index, t='fig'):
        self.mydata.drawsca(self.mydata.lastTestimages[index], t=t)

        self.yLBP[self.yLBP > 1] = 1
        self.yLBP[self.yLBP < 0] = 0
        self.mydata.drawsca(self.yLBP[index], t=t)

        self.yLAND[self.yLAND > 1] = 1
        self.yLAND[self.yLAND < 0] = 0
        self.mydata.drawsca(self.yLAND[index], t=t)

        s = np.std(self.yLBP[index] - self.mydata.lastTestimages[index])
        print("LBP标准差为%f" % s)
        s = np.std(self.yLAND[index] - self.mydata.lastTestimages[index])
        print("LAND标准差为%f" % s)
        mp.show()

        np.savetxt("data\lbp.txt",
                   self.yLBP,
                   fmt='%f',
                   delimiter=',',
                   newline='\n')
        np.savetxt("data\land.txt",
                   self.yLAND,
                   fmt='%f',
                   delimiter=',',
                   newline='\n')
class GAN():
    def __init__(self, t='fig'):
        self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data', 5000)
        self.mydata.initsca(t=t)
        print("data init success!")

        config = tensorflow.ConfigProto()
        config.gpu_options.allow_growth = True  #允许显存增长
        set_session(tensorflow.Session(config=config))

        optimizer = Adam()
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()
        self.generator.compile(loss='mean_squared_error', optimizer=optimizer)

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.mydata.capsize, ))
        img = self.generator(z)

        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        validity = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, validity)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)

    def build_generator(self):

        model = Sequential()
        model.add(Dense(256, input_dim=self.mydata.capsize))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(self.mydata.imgsize, activation='tanh'))
        model.summary()  #打印网络结构

        noise = Input(shape=(self.mydata.capsize, ))
        img = model(noise)

        return Model(noise, img)

    def build_discriminator(self):

        model = Sequential()
        model.add(Dense(512, input_dim=self.mydata.imgsize))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='tanh'))
        model.summary()
        img = Input(shape=(self.mydata.imgsize, ))
        validity = model(img)

        return Model(img, validity)

    def train(self, epochs, batch_size=100):

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))
        batchs = len(self.mydata.imgtrain) // batch_size
        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------
            for batch in range(batchs):
                #index=np.random.randint(0,4000,size=(batch_size,))
                noise = self.mydata.captrain[batch * batch_size:
                                             (batch + 1) * batch_size] * 2 - 1
                #noise=np.random.normal(0,1,size=(batch_size,28))有监督学习网络
                gen_imgs = self.generator.predict(noise)
                imgs = self.mydata.imgtrain[batch * batch_size:(batch + 1) *
                                            batch_size]

                # Train the discriminator
                #for _ in range (1):
                #    d_loss_real = self.discriminator.train_on_batch(imgs, valid)
                #    d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
                #    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                # ---------------------
                #  Train Generator
                # ---------------------

                # For the combined model we will only train the generator

                # Train the generator (to have the discriminator label samples as valid)
                gloss = self.generator.train_on_batch(noise, imgs)
                #gloss=1
                #g_loss = self.combined.train_on_batch(noise, valid)

            # Plot the progress
        # print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f,All loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], gloss,g_loss))
            print("%d [G loss: %f]" % (epoch, gloss))

    def printpic(self, index, t='fig'):
        self.mydata.drawsca(self.mydata.imgtest[index], t=t)
        y = self.generator.predict(self.mydata.captest[index:index + 1] * 2 -
                                   1)
        y[y > 1] = 1
        y[y < 0] = 0
        self.mydata.drawsca(y[0], t=t)
        mp.show()
Example #11
0
 def __init__(self, t='fig'):
     self.mydata = ECTdata('E:\deeplearning\ECT\数据生成\data', 20000)
     self.mydata.initsca(t=t)
     print("data init success!")
Example #12
0
from loaddata   import ECTdata
import tensorflow 
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras import Input
import matplotlib.pyplot as mp

#双向训练
mydata=ECTdata('E:\deeplearning\ECT\数据生成\data',10000)
mydata.initsca()
print("data init success!")

input=Input(shape=(mydata.imgsize,))
encoded = Dense(140, activation='relu')(input)
midnet=Dense(mydata.capsize, activation='relu')
mid = midnet(encoded)
midoutput=midnet.output
decoded = Dense(140, activation='relu')(mid)
output=Dense(mydata.imgsize, activation='relu')(decoded)


model=Model(inputs=input,outputs=[midoutput,output])
model1=Model(inputs=input,outputs=output)

model.compile(optimizer='adadelta', loss='mean_squared_error')
model.fit(mydata.imgtrain,[mydata.captrain,mydata.imgtrain],epochs=500,shuffle=True)
p=model.evaluate(mydata.imgtest,[mydata.captest,mydata.imgtest])
print("整个网络的损失为%f %f"%(p[0],p[1]))

mid2=Input(shape=(mydata.capsize,))
decoded1=model.layers[-2](mid2)
Example #13
0
from loaddata import ECTdata
import tensorflow
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras import Input

mydata = ECTdata('E:\deeplearning\ECT\数据生成\data')

input = Input(shape=(mydata.imgsize, ))
encoded = Dense(140, activation='relu')(input)
mid = Dense(mydata.capsize, activation='relu')(encoded)
decoded = Dense(140, activation='relu')(mid)
output = Dense(mydata.imgsize, activation='relu')(decoded)

model = Model(inputs=input, outputs=output)
model.compile(optimizer='adadelta', loss='mean_squared_error')
model.fit(mydata.imgtrain,
          mydata.imgtrain,
          epochs=100,
          shuffle=True,
          validation_data=(mydata.imgtest, mydata.imgtest))
p = model.evaluate(mydata.imgtest, mydata.imgtest)
print(p)
Example #14
0
class GAN:
    def __init__(self,t='fig'):      
        self.mydata=ECTdata('E:\deeplearning\ECT\数据生成\data',5000)
        self.mydata.initsca(t=t)
        print("data init success!")
        #关闭上次未完全关闭的会话
        if 'session' in locals() and tensorflow.session is not None:
            print('Close interactive session')
            tensorflow.session.close()
        config = tensorflow.ConfigProto()
        config.gpu_options.allow_growth = True  #允许显存增长
        set_session(tensorflow.Session(config=config))
        print('GPU memory is allowed to growth.')
        
    def model(self):
        '''建立完整模型'''
        optimizer = Adam(0.0002, 0.5)
        #生成器generator
        inputGen=Input(shape=(self.mydata.capsize,))
        g=Sequential()
        g.add(Dense(256,activation='sigmoid'))     
        g.add(Dense(512,activation='sigmoid'))
        g.add(Dense(1024,activation='sigmoid'))
        g.add(Dense(self.mydata.imgsize,activation='tanh'))
        g1=g(inputGen)
        self.Generator=Model(inputs=inputGen,outputs=g1)
        #self.Generator.compile(loss='binary_crossentropy',optimizer='adam')

        #判别器discriminator
        inputDis=Input(shape=(self.mydata.imgsize,))
        d=Sequential()
        d.add(Dense(512,activation='sigmoid')) 
        d.add(Dense(256,activation='sigmoid'))             
        d.add(Dense(1,activation='sigmoid'))
        d1=d(inputDis)
        self.Discriminator=Model(inputs=inputDis,outputs=d1)
        self.Discriminator.trainable=True
        self.Discriminator.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])

        #总体
        Generateimg=self.Generator(inputGen)
        self.Discriminator.trainable=False
        validity = self.Discriminator(Generateimg)
        self.combined = Model(inputGen, validity)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
 

    def train(self, epochs, batch_size=100, sample_interval=50):
        
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))
        batchs=len(self.mydata.imgtrain)//batch_size
        for epoch in range(epochs):

            for batch in range(batchs):
                '''  Train Discriminator   '''
                #Select a batch of data
                #realcap=self.mydata.captrain[batch*batch_size:(batch+1)*batch_size]
                realcap=np.random.uniform(0, 1, size=(batch_size, 28))
                realimg=self.mydata.imgtrain[batch*batch_size:(batch+1)*batch_size]
                # Generate a batch of new images
                gen_img = self.Generator.predict(realcap)
                #img=np.concatenate((realimg,gen_img),axis=0)
                #state = np.random.get_state()
                #np.random.shuffle(img)
                #label=np.append(valid,fake)
                #np.random.set_state(state)
                #np.random.shuffle(label)
                
                # Train the discriminator
                #d_loss = self.Discriminator.train_on_batch(img, label)
                d_loss_real = self.Discriminator.train_on_batch(realimg, valid)
                d_loss_fake = self.Discriminator.train_on_batch(gen_img, fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                '''  Train Generator   '''
                realcap=np.random.uniform(0, 1, size=(batch_size, 28))
                g_loss = self.combined.train_on_batch(realcap, valid)
                
            print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))

    def printpic(self,index,t='fig'):      
        self.mydata.drawsca(self.mydata.imgtest[index],t=t)
        y=self.Generator.predict(self.mydata.captest[index:index+1])
        y[y>1]=1
        y[y<0]=0
        self.mydata.drawsca(y[0],t=t)
        mp.show()
Example #15
0
from loaddata import ECTdata
import matplotlib.pyplot as mp
import numpy as np

mydata = ECTdata('E:\deeplearning\ECT\数据生成\datatest')
mydata.initsca(t='tri')
for i in range(2):
    mydata.drawsca(mydata.images[i], t='tri')
mp.show()