Beispiel #1
0
 def train_model(self, train_X, train_Y, sess, summ):
     # 预训练
     print("Start Pre-training...")
     self.dbm.train_model(train_X, sess, summ)
     # 微调
     print("Start Fine-tuning...")
     _data = Batch(images=train_X,
                   labels=train_Y,
                   batch_size=self.batch_size)
     n = train_X.shape[0]
     m = int(n / self.batch_size)
     mod = max(int(self.dbn_epochs * m / 1000), 1)
     # 迭代次数
     k = 0
     for i in range(self.dbn_epochs):
         for _ in range(m):
             k = k + 1
             batch_x, batch_y = _data.next_batch()
             summary, loss, _ = sess.run(
                 [self.merge, self.loss, self.train_batch_bp],
                 feed_dict={
                     self.input_data: batch_x,
                     self.label_data: batch_y
                 })
             #**************** 写入 ******************
             if k % mod == 0: summ.train_writer.add_summary(summary, k)
             #****************************************
         print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
Beispiel #2
0
 def train_model(self, train_X, train_Y, sess):
     # 初始化变量
     sess.run(tf.global_variables_initializer())
     # 预训练
     print("[Start Pre-training...]")
     self.rbms.train_model(train_X, sess)
     # 微调
     print("[Start Fine-tuning...]")
     _data = Batch(images=train_X,
                   labels=train_Y,
                   batch_size=self.batch_size)
     n = train_X.shape[0]
     # Save Model Params
     #saver = tf.train.Saver({'loss':self.loss,'pred':self.pred})
     # 迭代次数
     for i in range(self.dbn_epochs):
         for _ in range(int(n / self.batch_size)):
             batch_x, batch_y = _data.next_batch()
             loss, _ = sess.run([self.loss, self.train_batch_bp],
                                feed_dict={
                                    self.input_data: batch_x,
                                    self.label_data: batch_y
                                })
         print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
         if i % 10 == 0:
             self.saver.save(sess, './checkpoint/model.ckpt', global_step=i)
     self.saver.save(sess,
                     './checkpoint/model.ckpt',
                     global_step=self.dbn_epochs - 1)
Beispiel #3
0
    def unsupervised_train_model(self, train_X, sess, summ):
        _data = Batch(images=train_X, labels=None, batch_size=self.batch_size)

        b = int(train_X.shape[0] / self.batch_size)
        # 迭代次数
        for i in range(self.epochs):
            sum_loss = 0
            for j in range(b):
                if self.decay_lr:
                    self.lr = self.lr * 0.94
                batch_x = _data.next_batch()
                loss, _ = sess.run([self.loss, self.train_batch],
                                   feed_dict={
                                       self.input_data: batch_x,
                                       self.label_data: batch_x
                                   })
                sum_loss = sum_loss + loss

            #**************** 写入 ******************
            summary = sess.run(self.merge,
                               feed_dict={
                                   self.input_data: batch_x,
                                   self.label_data: batch_x
                               })
            summ.train_writer.add_summary(summary, i)
            #****************************************
            loss = sum_loss / b
            print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
Beispiel #4
0
    def train_model(self, train_X, sess, summ):
        # 初始化变量
        _data = Batch(images=train_X, batch_size=self.batch_size)
        n = train_X.shape[0]
        m = int(n / self.batch_size)
        mod = max(int(self.ae_epochs * m / 1000), 1)
        # 迭代次数
        k = 0
        A = np.ones((self.batch_size, self.n_x), dtype=np.float32)
        for i in range(self.ae_epochs):
            # 批次训练
            self.momentum = i / self.ae_epochs
            for _ in range(m):
                k = k + 1
                batch = _data.next_batch()
                if self.ae_type == 'dae':
                    batch, A = self.add_noise(batch)
                summary, loss, _ = sess.run(
                    [self.merge, self.loss, self.train_batch_bp],
                    feed_dict={
                        self.input_data: batch,
                        self.A: A
                    })

                #**************** 写入 ******************
                if k % mod == 0: summ.train_writer.add_summary(summary, k)
                #****************************************
            print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
    def unsupervised_train_model(self, train_X, sess, summ):
        _data = Batch(images=train_X, labels=None, batch_size=self.batch_size)
        n = train_X.shape[0]
        m = int(n / self.batch_size)
        mod = max(int(self.epochs * m / 1000), 1)

        # 迭代次数
        k = 0
        for i in range(self.epochs):
            sum_loss = 0
            for _ in range(m):
                k = k + 1
                batch_x = _data.next_batch()
                summary, loss, _ = sess.run(
                    [self.merge, self.loss, self.train_batch],
                    feed_dict={
                        self.input_data: batch_x,
                        self.label_data: batch_x
                    })
                #**************** 写入 ******************
                if k % mod == 0: summ.train_writer.add_summary(summary, k)
                #****************************************
                sum_loss = sum_loss + loss
            loss = sum_loss / m
            print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
Beispiel #6
0
 def train_model(self, train_X, sess):
     # 初始化变量
     sess.run(tf.global_variables_initializer())
     _data = Batch(images=train_X, batch_size=self.batch_size)
     n = train_X.shape[0]
     # 迭代次数
     for i in range(self.rbm_epochs):
         # 批次训练
         for _ in range(int(n / self.batch_size)):
             batch = _data.next_batch()
             loss, _ = sess.run([self.loss, self.train_batch_cdk],
                                feed_dict={self.input_data: batch})
         print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
Beispiel #7
0
    def unsupervised_train_model(self, train_X, train_Y, sess, summ):
        if self.use_label: labels = train_Y
        else: labels = None
        _data = Batch(images=train_X,
                      labels=labels,
                      batch_size=self.batch_size)

        b = int(train_X.shape[0] / self.batch_size)

        ########################################################
        #     开始训练 -------- < start traning for rbm/ae>     #
        ########################################################

        # 迭代次数
        for i in range(self.epochs):
            sum_loss = 0
            if self.decay_lr:
                self.lr = self.lr * 0.94
            for j in range(b):
                batch_x = _data.next_batch()
                loss, _ = sess.run([self.loss, self.train_batch],
                                   feed_dict={
                                       self.input_data: batch_x,
                                       self.recon_data: batch_x
                                   })
                sum_loss = sum_loss + loss

            #**************** 写入 ******************
            if self.tbd:
                summary = sess.run(self.merge,
                                   feed_dict={
                                       self.input_data: batch_x,
                                       self.recon_data: batch_x
                                   })
                summ.train_writer.add_summary(summary, i)
            #****************************************
            loss = sum_loss / b
            string = '>>> epoch = {}/{}  | 「Train」: loss = {:.4}'.format(
                i + 1, self.epochs, loss)
            sys.stdout.write('\r' + string)
            sys.stdout.flush()

        print('')
Beispiel #8
0
 def train_model(self, train_X, train_Y, sess):
     # 初始化变量
     sess.run(tf.global_variables_initializer())
     # 训练
     print("[Start Training...]")
     _data = Batch(images=train_X,
                   labels=train_Y,
                   batch_size=self.batch_size)
     n = train_X.shape[0]
     # 迭代次数
     for i in range(self.cnn_epochs):
         for _ in range(int(n / self.batch_size)):
             batch_x, batch_y = _data.next_batch()
             loss, _ = sess.run([self.loss, self.train_batch_bp],
                                feed_dict={
                                    self.input_data: batch_x,
                                    self.label_data: batch_y
                                })
         print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
 def train_model(self, train_X, sess, summ):
     # 初始化变量
     _data = Batch(images=train_X, batch_size=self.batch_size)
     n = train_X.shape[0]
     m = int(n / self.batch_size)
     mod = max(int(self.rbm_epochs * m / 1000), 1)
     # 迭代次数
     k = 0
     for i in range(self.rbm_epochs):
         # 批次训练
         for _ in range(int(n / self.batch_size)):
             k = k + 1
             batch = _data.next_batch()
             summary, loss, _ = sess.run(
                 [self.merge, self.loss, self.train_batch_cdk],
                 feed_dict={self.input_data: batch})
             #**************** 写入 ******************
             if k % mod == 0: summ.train_writer.add_summary(summary, k)
             #****************************************
         print('>>> epoch = {} , loss = {:.4}'.format(i + 1, loss))
Beispiel #10
0
    def train_model(self,
                    train_X,
                    train_Y=None,
                    test_X=None,
                    test_Y=None,
                    sess=None,
                    summ=None,
                    load_saver=''):

        W_csv_pt = None
        saver = tf.train.Saver()

        if load_saver == 'f':
            # 加载训练好的模型 --- < fine-tuned >
            print("Load Fine-tuned model...")
            ft_save_path = '../saver/' + self.name + '/fine-tune'
            if not os.path.exists(ft_save_path): os.makedirs(ft_save_path)
            saver.restore(sess, ft_save_path + '/fine-tune.ckpt')

        elif load_saver == 'p':
            # 加载预训练的模型 --- < pre-trained >
            print("Load Pre-trained model...")
            pt_save_path = '../saver/' + self.name + '/pre-train'
            if not os.path.exists(pt_save_path): os.makedirs(pt_save_path)
            saver.restore(sess, pt_save_path + '/pre-train.ckpt')

        elif self.pt_model is not None:

            #####################################################################
            #     开始逐层预训练 -------- < start pre-traning layer by layer>     #
            #####################################################################

            print("Start Pre-training...")
            pre_time_start = time.time()
            # >>> Pre-traning -> unsupervised_train_model
            self.deep_feature = self.pt_model.train_model(train_X=train_X,
                                                          train_Y=train_Y,
                                                          sess=sess,
                                                          summ=summ)
            pre_time_end = time.time()
            self.pre_exp_time = pre_time_end - pre_time_start
            print('>>> Pre-training expend time = {:.4}'.format(
                self.pre_exp_time))

            if self.save_weight:
                W_csv_pt = self.save_modele_weight_csv('pt', sess)
            if self.save_model:
                print("Save Pre-trained model...")
                saver.save(sess, pt_save_path + '/pre-train.ckpt')
            if self.use_for == 'classification' and self.do_tSNE:
                tSNE_2d(self.deep_feature, train_Y, 'train')
                if test_Y is not None:
                    test_deep_feature = sess.run(
                        self.pt_model.transform(test_X))
                    tSNE_2d(test_deep_feature, test_Y, 'test')

        self.test_Y = test_Y
        # 统计测试集各类样本总数
        self.stat_label_total()

        #######################################################
        #     开始微调 -------------- < start fine-tuning >    #
        #######################################################

        if load_saver != 'f':
            print("Start Fine-tuning...")
            _data = Batch(images=train_X,
                          labels=train_Y,
                          batch_size=self.batch_size)

            b = int(train_X.shape[0] / self.batch_size)
            self.loss_and_acc = np.zeros((self.epochs, 4))
            # 迭代次数
            time_start = time.time()
            for i in range(self.epochs):
                sum_loss = 0
                sum_acc = 0
                for j in range(b):
                    batch_x, batch_y = _data.next_batch()
                    loss, acc, _ = sess.run(
                        [self.loss, self.accuracy, self.train_batch],
                        feed_dict={
                            self.input_data: batch_x,
                            self.label_data: batch_y,
                            self.keep_prob: 1 - self.dropout
                        })
                    sum_loss = sum_loss + loss
                    sum_acc = sum_acc + acc

                #**************** 写入 ******************
                if self.tbd:
                    summary = sess.run(self.merge,
                                       feed_dict={
                                           self.input_data: batch_x,
                                           self.label_data: batch_y,
                                           self.keep_prob: 1 - self.dropout
                                       })
                    summ.train_writer.add_summary(summary, i)
                #****************************************
                loss = sum_loss / b
                acc = sum_acc / b

                self.loss_and_acc[i][0] = loss  # <0> 损失loss
                time_end = time.time()
                time_delta = time_end - time_start
                self.loss_and_acc[i][3] = time_delta  # <3> 耗时time

                # >>> for 'classification'
                if self.use_for == 'classification':
                    self.loss_and_acc[i][1] = acc  # <1> 训练acc
                    string = '>>> epoch = {}/{}  | 「Train」: loss = {:.4} , accuracy = {:.4}% , expend time = {:.4}'.format(
                        i + 1, self.epochs, loss, acc * 100, time_delta)

                    ###########################################################
                    #     开始测试    <classification>  with: test_X, test_Y   #
                    ###########################################################

                    if test_Y is not None:
                        acc = self.test_average_accuracy(test_X, test_Y, sess)
                        string = string + '  | 「Test」: accuracy = {:.4}%'.format(
                            acc * 100)
                        self.loss_and_acc[i][2] = acc  # <2> 测试acc

                    sys.stdout.write('\r' + string)
                    sys.stdout.flush()

                # >>> for 'prediction'
                else:
                    string = '>>> epoch = {}/{}  | 「Train」: loss = {:.4}'.format(
                        i + 1, self.epochs, loss)

                    ###########################################################
                    #     开始测试    <prediction>  with: test_X, test_Y       #
                    ###########################################################

                    if test_Y is not None:
                        mse, pred_Y = self.test_model(test_X, test_Y, sess)
                        string = string + '  | 「Test」: mse = {:.4}%'.format(
                            mse)
                        self.loss_and_acc[i][2] = mse  # <2> 测试acc
                        if mse < self.mse:
                            self.mse = mse
                            self.pred_Y = pred_Y

                    sys.stdout.write('\r' + string)
                    sys.stdout.flush()

            print('')
            np.savetxt("../saver/loss_and_acc.csv",
                       self.loss_and_acc,
                       fmt='%.4f',
                       delimiter=",")

            if self.save_model:
                print("Save model...")
                saver.save(sess, ft_save_path + '/fine-tune.ckpt')

        #################################################################
        #     开始测试    <classification, prediction>  with: test_X     #
        #################################################################

        if test_X is not None and test_Y is None:
            if self.use_for == 'classification':
                _, pred = self.test_model(test_X, test_Y, sess)
                self.pred_class = np.argmax(pred, axis=1)
            else:
                _, self.pred_Y = self.test_model(test_X, test_Y, sess)

        if self.save_weight:
            W_csv_ft = self.save_modele_weight_csv('ft', sess)

        if self.plot_para:
            plot_para_pic(W_csv_pt, W_csv_ft, name=self.name)
Beispiel #11
0
    def train_model(self,
                    train_X,
                    train_Y=None,
                    val_X=None,
                    val_Y=None,
                    sess=None,
                    summ=None,
                    load_saver=''):
        pt_save_path = '../saver/' + self.name + '/pre-train'
        ft_save_path = '../saver/' + self.name + '/fine-tune'
        if not os.path.exists(pt_save_path): os.makedirs(pt_save_path)
        if not os.path.exists(ft_save_path): os.makedirs(ft_save_path)
        saver = tf.train.Saver()
        if load_saver == 'f':
            # 加载训练好的模型
            print("Load Fine-tuned model...")
            saver.restore(sess, ft_save_path + '/fine-tune.ckpt')
            test_acc = self.validation_model(val_X, val_Y, sess)
            return print('>>> Test accuracy = {:.4}'.format(test_acc))
        elif load_saver == 'p':
            # 加载预训练的模型
            print("Load Pre-trained model...")
            saver.restore(sess, pt_save_path + '/pre-train.ckpt')
        elif self.pt_model is not None:
            # 开始预训练
            print("Start Pre-training...")
            self.pt_model.train_model(train_X=train_X, sess=sess, summ=summ)
            print("Save Pre-trained model...")
            saver.save(sess, pt_save_path + '/pre-train.ckpt')
        # 开始微调
        print("Start Fine-tuning...")
        _data = Batch(images=train_X,
                      labels=train_Y,
                      batch_size=self.batch_size)

        b = int(train_X.shape[0] / self.batch_size)
        self.record_array = np.zeros((self.epochs, 3))
        # 迭代次数
        for i in range(self.epochs):
            sum_loss = 0
            sum_acc = 0
            for j in range(b):
                batch_x, batch_y = _data.next_batch()
                loss, acc, _ = sess.run(
                    [self.loss, self.accuracy, self.train_batch],
                    feed_dict={
                        self.input_data: batch_x,
                        self.label_data: batch_y,
                        self.keep_prob: 1 - self.dropout
                    })
                sum_loss = sum_loss + loss
                sum_acc = sum_acc + acc

            #**************** 写入 ******************
            summary = sess.run(self.merge,
                               feed_dict={
                                   self.input_data: batch_x,
                                   self.label_data: batch_y,
                                   self.keep_prob: 1 - self.dropout
                               })
            summ.train_writer.add_summary(summary, i)
            #****************************************
            loss = sum_loss / b
            acc = sum_acc / b
            print('>>> epoch = {} , loss = {:.4} , accuracy = {:.4}'.format(
                i + 1, loss, acc))
            self.record_array[i][0] = loss
            self.record_array[i][1] = acc
            if val_X is not None:
                val_acc = self.validation_model(val_X, val_Y, sess)
                print('    >>> validation accuracy = {:.4}'.format(val_acc))
                self.record_array[i][2] = val_acc

        print("Save model...")
        saver.save(sess, ft_save_path + '/fine-tune.ckpt')
    def train_model(self,train_X,train_Y=None,val_X=None,val_Y=None,sess=None,summ=None,load_saver=''):
        pt_save_path='../saver/'+self.name+'/pre-train'
        ft_save_path='../saver/'+self.name+'/fine-tune'
        if not os.path.exists(pt_save_path): os.makedirs(pt_save_path)
        if not os.path.exists(ft_save_path): os.makedirs(ft_save_path)
        saver = tf.train.Saver()
        if load_saver=='f':
            # 加载训练好的模型
            print("Load Fine-tuned model...")
            saver.restore(sess,ft_save_path+'/fine-tune.ckpt')
            test_acc,pred_val=self.validation_classification_model(val_X,val_Y,sess)
            import pandas as pd
            pred_label = []
            for k in range(len(pred_val)):
                max_index = np.argmax(pred_val[k])
                if (max_index == 0):
                    pred_label.append(7)
                elif (max_index == 1):
                    pred_label.append(8)
                # elif (max_index == 2):
                #     pred_label.append(3)
                # elif (max_index == 3):
                #     pred_label.append(4)
                # elif (max_index == 4):
                #     pred_label.append(5)
                # elif (max_index == 5):
                #     pred_label.append(6)
                # elif (max_index == 6):
                #     pred_label.append(7)
                # elif (max_index == 7):
                #     pred_label.append(8)
                # elif (max_index == 8):
                #     pred_label.append(9)
            biaoqian = pd.DataFrame(pred_label)
            biaoqian.to_csv(r"D:\gao\jinzhou\33_doc\predict_biaoqian_33_e.csv", sep=',', index=False)
            return print('>>> Test accuracy = {:.4}'.format(test_acc))
        elif load_saver=='p':
            # 加载预训练的模型
            print("Load Pre-trained model...")
            saver.restore(sess,pt_save_path+'/pre-train.ckpt')
        elif self.pt_model is not None:
            # 开始预训练
            print("Start Pre-training...")
            self.pt_model.train_model(train_X=train_X,train_Y=train_Y,sess=sess,summ=summ)
            if self.sav:
                print("Save Pre-trained model...")
                saver.save(sess,pt_save_path+'/pre-train.ckpt')
            if self.plot_para:
                self.pt_img = sess.run(self.pt_model.parameter_list)
        # 开始微调
        print("Start Fine-tuning...")
        _data=Batch(images=train_X,
                    labels=train_Y,
                    batch_size=self.batch_size)
        
        b = int(train_X.shape[0]/self.batch_size)
        self.train_curve=np.zeros((self.epochs,3))
        self.label_tag=val_Y
        
        # 迭代次数
        for i in range(self.epochs):
            sum_loss=0; sum_acc=0
            # train_data, _ = sess.run([self.next_data, self.train_batch], feed_dict={
            #     self.input_data: train_X,
            #     self.label_data:train_Y,
            #     self.keep_prob: 1 - self.dropout})
            # test_data, _ = sess.run([self.next_data, self.train_batch], feed_dict={
            #     self.input_data: val_X,
            #     self.label_data:val_Y,
            #     self.keep_prob: 1 - self.dropout})
            for j in range(b):
                batch_x, batch_y= _data.next_batch()
                loss,acc,_=sess.run([self.loss,self.accuracy,self.train_batch],feed_dict={
                        self.input_data: batch_x,
                        self.label_data: batch_y,
                        self.keep_prob: 1-self.dropout})
                sum_loss = sum_loss + loss; sum_acc= sum_acc +acc
                
            #**************** 写入 ******************
            if self.tbd:
                summary = sess.run(self.merge,feed_dict={self.input_data: batch_x,self.label_data: batch_y,self.keep_prob: 1-self.dropout})
                summ.train_writer.add_summary(summary, i)
            #****************************************
            loss = sum_loss/b; acc = sum_acc/b
            
            self.train_curve[i][0]=loss
            if self.use_for=='classification':
                self.train_curve[i][1]=acc
                print('>>> epoch = {} , loss = {:.4} , accuracy = {:.4}'.format(i+1,loss,acc))
                if val_X is not None:
                    val_acc,pred=self.validation_classification_model(val_X,val_Y,sess)
                    print('    >>> test accuracy = {:.4}'.format(val_acc))
                    self.train_curve[i][2]=val_acc
            else:
                print('>>> epoch = {} , loss = {:.4}'.format(i+1,loss))

        # import pandas as pd
        # deep_feature_train = pd.DataFrame(train_data)
        # deep_feature_test =pd.DataFrame(test_data)
        # print("保存深度特征")
        # deep_feature_train.to_csv(r"D:\gao\jiangxia\code1\doc\deep_feature_train.csv",sep=',')
        # deep_feature_test.to_csv(r"D:\gao\jiangxia\code1\doc\deep_feature_val.csv",sep=',')
        if self.use_for=='prediction':
            mse,self.pred_Y = self.test_model(val_X,val_Y,sess)
            self.test_Y = val_Y
            self.mse=mse
        
        if self.sav:                   
            print("Save model...")
            saver.save(sess,ft_save_path+'/fine-tune.ckpt')
        if self.plot_para:
            self.img = sess.run(self.parameter_list)
            plot_para_pic(self.pt_img,self.img,name=self.name)
    def train_model(self,
                    train_X,
                    train_Y=None,
                    val_X=None,
                    val_Y=None,
                    sess=None,
                    summ=None,
                    load_saver=''):
        pt_save_path = '../saver/' + self.name + '/pre-train.ckpt'
        ft_save_path = '../saver/' + self.name + '/fine-tune.ckpt'
        saver = tf.train.Saver()
        if load_saver == 'load_f':
            # 加载训练好的模型
            print("Load Fine-tuned model...")
            saver.restore(sess, ft_save_path)
            return
        elif load_saver == 'load_p':
            # 加载预训练的模型
            print("Load Pre-trained model...")
            saver.restore(sess, pt_save_path)
        elif self.pt_model is not None:
            # 开始预训练
            print("Start Pre-training...")
            self.pt_model.train_model(train_X=train_X, sess=sess, summ=summ)
            print("Save Pre-trained model...")
            saver.save(sess, pt_save_path)
        # 开始微调
        print("Start Fine-tuning...")
        _data = Batch(images=train_X,
                      labels=train_Y,
                      batch_size=self.batch_size)
        n = train_X.shape[0]
        m = int(n / self.batch_size)
        mod = max(int(self.epochs * m / 1000), 1)

        # 迭代次数
        k = 0
        for i in range(self.epochs):
            sum_loss = 0
            sum_acc = 0
            for _ in range(m):
                k = k + 1
                batch_x, batch_y = _data.next_batch()
                # batch_x, batch_y= batch_x[:int(self.batch_size/4),:], batch_y[:int(self.batch_size/4),:]
                summary, loss, acc, _ = sess.run(
                    [self.merge, self.loss, self.accuracy, self.train_batch],
                    feed_dict={
                        self.input_data: batch_x,
                        self.label_data: batch_y,
                        self.keep_prob: 1 - self.dropout
                    })
                #**************** 写入 ******************
                if k % mod == 0: summ.train_writer.add_summary(summary, k)
                #****************************************
                sum_loss = sum_loss + loss
                sum_acc = sum_acc + acc
            loss = sum_loss / m
            acc = sum_acc / m
            print('>>> epoch = {} , loss = {:.4} , accuracy = {:.4}'.format(
                i + 1, loss, acc))
            if val_X is not None:
                self.validation_model(val_X, val_Y, sess)

        print("Save model...")
        saver.save(sess, ft_save_path)