Esempio n. 1
0
    def train(self):
        print('Start to run in mode [Domain Adaptation Across Source and Target Domain]')
        self.sess.run(tf.global_variables_initializer())

        self.train_itr = len(self.source_training_data[0]) // self.bs

        for e in range(1, self.eps + 1):
            _src_tr_img, _src_tr_lab = DA_init.shuffle_data(self.source_training_data[0],
                                                            self.source_training_data[1])
            _tar_tr_img = DA_init.shuffle_data_nolabel(self.target_training_data)

            g_loss = 0.0
            d_loss = 0.0

            for itr in range(self.train_itr):
                _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(_src_tr_img, _src_tr_lab, self.bs, itr)
                _tar_tr_img_batch = DA_init.next_batch_nolabel(_tar_tr_img, self.bs)

                feed_dict = {self.x_source: _src_tr_img_batch,
                             self.x_target: _tar_tr_img_batch,
                             self.is_training: True}
                feed_dict_eval = {self.x_source: _src_tr_img_batch,
                                  self.x_target: _tar_tr_img_batch,
                                  self.is_training: False}

                _ = self.sess.run(self.d_train_op, feed_dict=feed_dict)
                _ = self.sess.run(self.g_train_op, feed_dict=feed_dict)

                _g_loss, _d_loss = self.sess.run([self.g_loss, self.d_loss], feed_dict=feed_dict_eval)

                g_loss += _g_loss
                d_loss += _d_loss

            summary = self.sess.run(self.merged, feed_dict=feed_dict_eval)

            g_loss = float(g_loss / self.train_itr)
            d_loss = float(d_loss / self.train_itr)

            log1 = "Epoch: [%d], G Loss: [%g], D Loss: [%g], Time: [%s]" % (e, g_loss, d_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_d_loss.append(d_loss)
            self.plt_g_loss.append(g_loss)

            utils_model1.plotLoss(x=self.plt_epoch,
                                  y1=self.plt_d_loss,
                                  y2=self.plt_g_loss,
                                  figName=self.model + '_GD_Loss',
                                  line1Name='D_Loss',
                                  line2Name='G_Loss',
                                  savePath=self.ckptDir)

            utils_model1.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(e))
Esempio n. 2
0
    def train(self):
        print(
            'Start to run in mode [Domain Adaptation Across Source and Target Domain]'
        )
        self.sess.run(tf.global_variables_initializer())
        self.preTrained_saver = tf.train.Saver(var_list=self.g2_preTrained_var)
        self.preTrained_saver.restore(self.sess, self.preTrained_path)
        print('Pre-trained model has been successfully restored !')

        self.train_itr = len(self.source_training_data[0]) // self.bs

        for e in range(1, self.eps + 1):
            _src_tr_img, _src_tr_lab = DA_init.shuffle_data(
                self.source_training_data[0], self.source_training_data[1])
            _tar_tr_img = DA_init.shuffle_data_nolabel(
                self.target_training_data)

            source_training_acc = 0.0
            source_training_loss = 0.0
            g_loss = 0.0
            d_loss = 0.0

            for itr in range(self.train_itr):
                _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(
                    _src_tr_img, _src_tr_lab, self.bs, itr)
                _tar_tr_img_batch = DA_init.next_batch_nolabel(
                    _tar_tr_img, self.bs)

                feed_dict = {
                    self.x_source: _src_tr_img_batch,
                    self.y_source: _src_tr_lab_batch,
                    self.x_target: _tar_tr_img_batch,
                    self.is_training: True,
                    self.keep_prob: self.kp
                }
                feed_dict_eval = {
                    self.x_source: _src_tr_img_batch,
                    self.y_source: _src_tr_lab_batch,
                    self.x_target: _tar_tr_img_batch,
                    self.is_training: False,
                    self.keep_prob: 1.0
                }

                if e < 100:
                    _ = self.sess.run(self.g_train_op_step1,
                                      feed_dict=feed_dict)
                    _training_accuracy, _training_loss = self.sess.run(
                        [self.accuracy_source, self.loss_source],
                        feed_dict=feed_dict_eval)

                    source_training_acc += _training_accuracy
                    source_training_loss += _training_loss

                elif e < 200:
                    _, _ = self.sess.run(
                        [self.g_train_op_step2, self.d_train_op_step1],
                        feed_dict=feed_dict)
                    _training_accuracy, _training_loss, _g_loss, _d_loss = self.sess.run(
                        [
                            self.accuracy_source, self.loss_source,
                            self.g_loss_step2, self.d_loss_step1
                        ],
                        feed_dict=feed_dict_eval)

                    source_training_acc += _training_accuracy
                    source_training_loss += _training_loss
                    g_loss += _g_loss
                    d_loss += _d_loss

                elif e < self.eps:
                    _, _ = self.sess.run(
                        [self.g_train_op_step3, self.d_train_op_step2],
                        feed_dict=feed_dict)
                    _training_accuracy, _training_loss, _g_loss, _d_loss = self.sess.run(
                        [
                            self.accuracy_source, self.loss_source,
                            self.g_loss_step3, self.d_loss_step2
                        ],
                        feed_dict=feed_dict_eval)

                    source_training_acc += _training_accuracy
                    source_training_loss += _training_loss
                    g_loss += _g_loss
                    d_loss += _d_loss

            summary = self.sess.run(self.merged, feed_dict=feed_dict_eval)

            source_training_acc = float(source_training_acc / self.train_itr)
            source_training_loss = float(source_training_loss / self.train_itr)
            g_loss = float(g_loss / self.train_itr)
            d_loss = float(d_loss / self.train_itr)

            source_validation_acc, source_validation_loss = self.validation_procedure(
                validation_data=self.source_validation_data,
                distribution_op=self.distribution_source,
                loss_op=self.loss_source,
                inputX=self.x_source,
                inputY=self.y_source)

            log1 = "Epoch: [%d], Domain: Source, Training Accuracy: [%g], Validation Accuracy: [%g], " \
                   "Training Loss: [%g], Validation Loss: [%g], generator Loss: [%g], Discriminator Loss: [%g], " \
                   "Time: [%s]" % (
                       e, source_training_acc, source_validation_acc, source_training_loss, source_validation_loss,
                       g_loss, d_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_training_accuracy.append(source_training_acc)
            self.plt_training_loss.append(source_training_loss)
            self.plt_validation_accuracy.append(source_validation_acc)
            self.plt_validation_loss.append(source_validation_loss)
            self.plt_d_loss.append(d_loss)
            self.plt_g_loss.append(g_loss)

            da_utils.plotAccuracy(x=self.plt_epoch,
                                  y1=self.plt_training_accuracy,
                                  y2=self.plt_validation_accuracy,
                                  figName=self.model,
                                  line1Name='training',
                                  line2Name='validation',
                                  savePath=self.ckptDir)

            da_utils.plotLoss(x=self.plt_epoch,
                              y1=self.plt_training_loss,
                              y2=self.plt_validation_loss,
                              figName=self.model,
                              line1Name='training',
                              line2Name='validation',
                              savePath=self.ckptDir)

            da_utils.plotLoss(x=self.plt_epoch,
                              y1=self.plt_d_loss,
                              y2=self.plt_g_loss,
                              figName=self.model + '_GD_Loss',
                              line1Name='D_Loss',
                              line2Name='G_Loss',
                              savePath=self.ckptDir)

            da_utils.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess,
                            self.ckptDir + self.model + '-' + str(e))

            self.test_procedure(self.source_test_data,
                                distribution_op=self.distribution_source,
                                inputX=self.x_source,
                                inputY=self.y_source,
                                mode='source')
            self.test_procedure(self.target_test_data,
                                distribution_op=self.distribution_target,
                                inputX=self.x_target,
                                inputY=self.y_target,
                                mode='target')
    def train(self):
        print('Start to run in mode [Supervied Learning in Source Domain]')
        self.sess.run(tf.global_variables_initializer())
        self.train_itr = len(self.training_data[0]) // self.bs

        self.best_val_accuracy = []
        self.best_val_loss = []

        for e in range(1, self.eps + 1):
            _tr_img, _tr_lab = DA_init.shuffle_data(self.training_data[0],
                                                    self.training_data[1])

            training_acc = 0.0
            training_loss = 0.0

            for itr in range(self.train_itr):
                _tr_img_batch, _tr_lab_batch = DA_init.next_batch(
                    _tr_img, _tr_lab, self.bs, itr)
                _train_accuracy, _train_loss, _ = self.sess.run(
                    [self.accuracy, self.loss, self.train_op],
                    feed_dict={
                        self.x: _tr_img_batch,
                        self.y: _tr_lab_batch,
                        self.is_training: True
                    })
                training_acc += _train_accuracy
                training_loss += _train_loss

            summary = self.sess.run(self.merged,
                                    feed_dict={
                                        self.x: _tr_img_batch,
                                        self.y: _tr_lab_batch,
                                        self.is_training: False
                                    })

            training_acc = float(training_acc / self.train_itr)
            training_loss = float(training_loss / self.train_itr)

            validation_acc, validation_loss = self.validation_procedure()
            self.best_val_accuracy.append(validation_acc)
            self.best_val_loss.append(validation_loss)

            log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \
                   "Loss_validation: [%g], Time: [%s]" % \
                   (e, training_acc, validation_acc, training_loss, validation_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_training_accuracy.append(training_acc)
            self.plt_training_loss.append(training_loss)
            self.plt_validation_accuracy.append(validation_acc)
            self.plt_validation_loss.append(validation_loss)

            utils.plotAccuracy(x=self.plt_epoch,
                               y1=self.plt_training_accuracy,
                               y2=self.plt_validation_accuracy,
                               figName=self.model,
                               line1Name='training',
                               line2Name='validation',
                               savePath=self.ckptDir)

            utils.plotLoss(x=self.plt_epoch,
                           y1=self.plt_training_loss,
                           y2=self.plt_validation_loss,
                           figName=self.model,
                           line1Name='training',
                           line2Name='validation',
                           savePath=self.ckptDir)

            utils.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess,
                            self.ckptDir + self.model + '-' + str(e))

            self.test_procedure()

        self.best_val_index = self.best_val_accuracy.index(
            max(self.best_val_accuracy))
        log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % (
            self.best_val_accuracy[self.best_val_index],
            self.best_val_index + 1)
        utils.save2file(log2, self.ckptDir, self.model)

        self.best_val_index_loss = self.best_val_loss.index(
            min(self.best_val_loss))
        log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % (
            self.best_val_loss[self.best_val_index_loss],
            self.best_val_index_loss + 1)
        utils.save2file(log3, self.ckptDir, self.model)