Esempio n. 1
0
    def train(self):
        print('Start to run in mode [Domain Adaptation Across Source and Target Domain]')
        self.sess.run(tf.global_variables_initializer())

        self.train_itr = len(self.source_training_data[0]) // self.bs

        for e in range(1, self.eps + 1):
            _src_tr_img, _src_tr_lab = DA_init.shuffle_data(self.source_training_data[0],
                                                            self.source_training_data[1])
            _tar_tr_img = DA_init.shuffle_data_nolabel(self.target_training_data)

            g_loss = 0.0
            d_loss = 0.0

            for itr in range(self.train_itr):
                _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(_src_tr_img, _src_tr_lab, self.bs, itr)
                _tar_tr_img_batch = DA_init.next_batch_nolabel(_tar_tr_img, self.bs)

                feed_dict = {self.x_source: _src_tr_img_batch,
                             self.x_target: _tar_tr_img_batch,
                             self.is_training: True}
                feed_dict_eval = {self.x_source: _src_tr_img_batch,
                                  self.x_target: _tar_tr_img_batch,
                                  self.is_training: False}

                _ = self.sess.run(self.d_train_op, feed_dict=feed_dict)
                _ = self.sess.run(self.g_train_op, feed_dict=feed_dict)

                _g_loss, _d_loss = self.sess.run([self.g_loss, self.d_loss], feed_dict=feed_dict_eval)

                g_loss += _g_loss
                d_loss += _d_loss

            summary = self.sess.run(self.merged, feed_dict=feed_dict_eval)

            g_loss = float(g_loss / self.train_itr)
            d_loss = float(d_loss / self.train_itr)

            log1 = "Epoch: [%d], G Loss: [%g], D Loss: [%g], Time: [%s]" % (e, g_loss, d_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_d_loss.append(d_loss)
            self.plt_g_loss.append(g_loss)

            utils_model1.plotLoss(x=self.plt_epoch,
                                  y1=self.plt_d_loss,
                                  y2=self.plt_g_loss,
                                  figName=self.model + '_GD_Loss',
                                  line1Name='D_Loss',
                                  line2Name='G_Loss',
                                  savePath=self.ckptDir)

            utils_model1.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(e))
Esempio n. 2
0
    def getBatchData(self):
        _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(self.source_training_data[0],
                                                                  self.source_training_data[1], self.bs)
        _tar_tr_img_batch = DA_init.next_batch_unpaired(self.target_training_data, self.bs)

        feed_dict = {self.x_source: _src_tr_img_batch,
                     self.y_source: _src_tr_lab_batch,
                     self.x_target: _tar_tr_img_batch,
                     self.is_training: True,
                     self.keep_rate: 0.5}
        feed_dict_eval = {self.x_source: _src_tr_img_batch,
                          self.y_source: _src_tr_lab_batch,
                          self.x_target: _tar_tr_img_batch,
                          self.is_training: False,
                          self.keep_rate: 0.5}

        return feed_dict, feed_dict_eval
Esempio n. 3
0
    def getBatchData(self):
        _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(self.source_training_data[0],
                                                                  self.source_training_data[1], self.bs)

        feed_dict = {self.x_source: _src_tr_img_batch,
                     self.y_source: _src_tr_lab_batch,
                     self.is_training: True}
        feed_dict_eval = {self.x_source: _src_tr_img_batch,
                          self.y_source: _src_tr_lab_batch,
                          self.is_training: False}

        return feed_dict, feed_dict_eval
Esempio n. 4
0
def gatherImages_Labels_NooverlapImages(rootPath, mode):
    pklFilePath = rootPath + mode + '/'

    image_label_pairs = []
    Nooverlap_images = []

    fileNameList = DA_init.getFileNameList(pklFilePath)

    for f in fileNameList:
        _img, _lab, _nooverlap_img = DA_init.loadPickle(pklFilePath, f)

        for _i, _l in zip(_img, _lab):
            image_label_pairs.append([_i, _l])
        Nooverlap_images.append(_nooverlap_img)

    sorted_pairs = DA_init.sortVariousPairs(image_label_pairs)

    img_lib, lab_lib = [], []

    for i in range(len(sorted_pairs)):
        img_lib.append(sorted_pairs[i][0])
        lab_lib.append(sorted_pairs[i][1])

    img_lib = np.array(img_lib, dtype=np.float32)
    lab_lib = np.array(lab_lib, dtype=np.int32)

    img_lib = np.expand_dims(img_lib, axis=3)
    lab_lib = DA_init.onehotEncoder(lab_lib, num_class=6)

    Nooverlap_images = np.concatenate(Nooverlap_images, axis=0)
    Nooverlap_images_lib = np.expand_dims(Nooverlap_images, axis=3)

    print('-' * 20 + mode + ' dataset process finish' + '-' * 20)
    print(
        'Mode %s image lib shape: %s label lib shape: %s nooverlap images shape: %s'
        % (mode, str(img_lib.shape), str(
            lab_lib.shape), str(Nooverlap_images_lib.shape)))

    return img_lib, lab_lib, Nooverlap_images_lib
    def getBatchData(self):
        _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(
            self.source_training_data[0],
            self.source_training_data[1],
            self.bs,
            data_aug=False)
        _tar_tr_img_batch = DA_init.next_batch_unpaired(
            self.target_training_data, self.bs, data_aug=False)

        feed_dict = {
            self.X: _src_tr_img_batch,
            self.Y: _tar_tr_img_batch,
            self.is_training: True
        }

        feed_dict_eval = {
            self.X: _src_tr_img_batch,
            self.Y: _tar_tr_img_batch,
            self.is_training: False
        }

        return feed_dict, feed_dict_eval
Esempio n. 6
0
    def train(self):
        print(
            'Start to run in mode [Domain Adaptation Across Source and Target Domain]'
        )
        self.sess.run(tf.global_variables_initializer())
        self.preTrained_saver = tf.train.Saver(var_list=self.g2_preTrained_var)
        self.preTrained_saver.restore(self.sess, self.preTrained_path)
        print('Pre-trained model has been successfully restored !')

        self.train_itr = len(self.source_training_data[0]) // self.bs

        for e in range(1, self.eps + 1):
            _src_tr_img, _src_tr_lab = DA_init.shuffle_data(
                self.source_training_data[0], self.source_training_data[1])
            _tar_tr_img = DA_init.shuffle_data_nolabel(
                self.target_training_data)

            source_training_acc = 0.0
            source_training_loss = 0.0
            g_loss = 0.0
            d_loss = 0.0

            for itr in range(self.train_itr):
                _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(
                    _src_tr_img, _src_tr_lab, self.bs, itr)
                _tar_tr_img_batch = DA_init.next_batch_nolabel(
                    _tar_tr_img, self.bs)

                feed_dict = {
                    self.x_source: _src_tr_img_batch,
                    self.y_source: _src_tr_lab_batch,
                    self.x_target: _tar_tr_img_batch,
                    self.is_training: True,
                    self.keep_prob: self.kp
                }
                feed_dict_eval = {
                    self.x_source: _src_tr_img_batch,
                    self.y_source: _src_tr_lab_batch,
                    self.x_target: _tar_tr_img_batch,
                    self.is_training: False,
                    self.keep_prob: 1.0
                }

                if e < 100:
                    _ = self.sess.run(self.g_train_op_step1,
                                      feed_dict=feed_dict)
                    _training_accuracy, _training_loss = self.sess.run(
                        [self.accuracy_source, self.loss_source],
                        feed_dict=feed_dict_eval)

                    source_training_acc += _training_accuracy
                    source_training_loss += _training_loss

                elif e < 200:
                    _, _ = self.sess.run(
                        [self.g_train_op_step2, self.d_train_op_step1],
                        feed_dict=feed_dict)
                    _training_accuracy, _training_loss, _g_loss, _d_loss = self.sess.run(
                        [
                            self.accuracy_source, self.loss_source,
                            self.g_loss_step2, self.d_loss_step1
                        ],
                        feed_dict=feed_dict_eval)

                    source_training_acc += _training_accuracy
                    source_training_loss += _training_loss
                    g_loss += _g_loss
                    d_loss += _d_loss

                elif e < self.eps:
                    _, _ = self.sess.run(
                        [self.g_train_op_step3, self.d_train_op_step2],
                        feed_dict=feed_dict)
                    _training_accuracy, _training_loss, _g_loss, _d_loss = self.sess.run(
                        [
                            self.accuracy_source, self.loss_source,
                            self.g_loss_step3, self.d_loss_step2
                        ],
                        feed_dict=feed_dict_eval)

                    source_training_acc += _training_accuracy
                    source_training_loss += _training_loss
                    g_loss += _g_loss
                    d_loss += _d_loss

            summary = self.sess.run(self.merged, feed_dict=feed_dict_eval)

            source_training_acc = float(source_training_acc / self.train_itr)
            source_training_loss = float(source_training_loss / self.train_itr)
            g_loss = float(g_loss / self.train_itr)
            d_loss = float(d_loss / self.train_itr)

            source_validation_acc, source_validation_loss = self.validation_procedure(
                validation_data=self.source_validation_data,
                distribution_op=self.distribution_source,
                loss_op=self.loss_source,
                inputX=self.x_source,
                inputY=self.y_source)

            log1 = "Epoch: [%d], Domain: Source, Training Accuracy: [%g], Validation Accuracy: [%g], " \
                   "Training Loss: [%g], Validation Loss: [%g], generator Loss: [%g], Discriminator Loss: [%g], " \
                   "Time: [%s]" % (
                       e, source_training_acc, source_validation_acc, source_training_loss, source_validation_loss,
                       g_loss, d_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_training_accuracy.append(source_training_acc)
            self.plt_training_loss.append(source_training_loss)
            self.plt_validation_accuracy.append(source_validation_acc)
            self.plt_validation_loss.append(source_validation_loss)
            self.plt_d_loss.append(d_loss)
            self.plt_g_loss.append(g_loss)

            da_utils.plotAccuracy(x=self.plt_epoch,
                                  y1=self.plt_training_accuracy,
                                  y2=self.plt_validation_accuracy,
                                  figName=self.model,
                                  line1Name='training',
                                  line2Name='validation',
                                  savePath=self.ckptDir)

            da_utils.plotLoss(x=self.plt_epoch,
                              y1=self.plt_training_loss,
                              y2=self.plt_validation_loss,
                              figName=self.model,
                              line1Name='training',
                              line2Name='validation',
                              savePath=self.ckptDir)

            da_utils.plotLoss(x=self.plt_epoch,
                              y1=self.plt_d_loss,
                              y2=self.plt_g_loss,
                              figName=self.model + '_GD_Loss',
                              line1Name='D_Loss',
                              line2Name='G_Loss',
                              savePath=self.ckptDir)

            da_utils.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess,
                            self.ckptDir + self.model + '-' + str(e))

            self.test_procedure(self.source_test_data,
                                distribution_op=self.distribution_source,
                                inputX=self.x_source,
                                inputY=self.y_source,
                                mode='source')
            self.test_procedure(self.target_test_data,
                                distribution_op=self.distribution_target,
                                inputX=self.x_target,
                                inputY=self.y_target,
                                mode='target')
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

if args.data_domain == 'Source':
    src_name = 'source'
    tar_name = 'target'
    preTrained_path = '../checkpoint/bl_s2t_t1/bl_s2t_t1-51'
elif args.data_domain == 'Target':
    src_name = 'target'
    tar_name = 'source'
    preTrained_path = '../checkpoint/bl_t2s_t1/bl_t2s_t1-147'
else:
    src_name = ''
    tar_name = ''
    preTrained_path = ''

src_training = DA_init.loadPickle(da_utils.experimentalPath,
                                  src_name + '_training.pkl')
src_validation = DA_init.loadPickle(da_utils.experimentalPath,
                                    src_name + '_validation.pkl')
src_test = DA_init.loadPickle(da_utils.experimentalPath,
                              src_name + '_test.pkl')

tar_training = DA_init.loadPickle(da_utils.experimentalPath,
                                  tar_name + '_' + src_name + '.pkl')
tar_test = DA_init.loadPickle(da_utils.experimentalPath,
                              tar_name + '_test.pkl')

print('source training image shape', str(src_training[0].shape))
print('source training label shape', src_training[1].shape)
print('source training image mean/std', str(src_training[0].mean()),
      str(src_training[0].std()))
Esempio n. 8
0
parser.add_argument('-out_channel1', default=64, type=int)
parser.add_argument('-out_channel2', default=128, type=int)
parser.add_argument('-out_channel3', default=256, type=int)
parser.add_argument('-learning_rate', default=2e-4, type=float)
parser.add_argument('-weight_decay', default=1e-4, type=float)
parser.add_argument('-batch_size', default=128, type=int)
parser.add_argument('-img_height', default=32, type=int)
parser.add_argument('-img_width', default=32, type=int)
args = parser.parse_args()

os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

if args.data_domain == 'Source':
    print('Data From Source')
    training = DA_init.loadPickle(utils.experimentalPath,
                                  'source_training.pkl')
    validation = DA_init.loadPickle(utils.experimentalPath,
                                    'source_validation.pkl')
    test = DA_init.loadPickle(utils.experimentalPath, 'source_test.pkl')

    print('training image shape', str(training[0].shape))
    print('training label shape', str(training[1].shape))

    print('validation image shape', str(validation[0].shape))
    print('validation label shape', validation[1].shape)

    print('test image shape', test[0].shape)
    print('test label shape', test[1].shape)

if args.data_domain == 'Target':
    print('Data From Target')
parser.add_argument('-restore_epoch', default=0, type=int)
parser.add_argument('-num_class', default=6, type=int)
parser.add_argument('-ksize', default=3, type=int)
parser.add_argument('-out_channel1', default=16, type=int)
parser.add_argument('-out_channel2', default=32, type=int)
parser.add_argument('-out_channel3', default=64, type=int)
parser.add_argument('-learning_rate', default=1e-4, type=float)
parser.add_argument('-batch_size', default=128, type=int)
parser.add_argument('-img_height', default=32, type=int)
parser.add_argument('-img_width', default=32, type=int)
args = parser.parse_args()

os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

src_training = DA_init.loadPickle(FineTuning_utils.experimentalPath,
                                  'source_training.pkl')
src_validation = DA_init.loadPickle(FineTuning_utils.experimentalPath,
                                    'source_validation.pkl')
src_test = DA_init.loadPickle(FineTuning_utils.experimentalPath,
                              'source_test.pkl')

tar_training = DA_init.loadPickle(FineTuning_utils.experimentalPath,
                                  'target_training.pkl')
tar_validation = DA_init.loadPickle(FineTuning_utils.experimentalPath,
                                    'target_validation.pkl')
tar_test = DA_init.loadPickle(FineTuning_utils.experimentalPath,
                              'target_test.pkl')

src_training = FineTuning_utils.normalizeInput(src_training, mode='Paired')
src_validation = FineTuning_utils.normalizeInput(src_validation, mode='Paired')
src_test = FineTuning_utils.normalizeInput(src_test, mode='Paired')
    def train(self):
        print('Start to run in mode [Supervied Learning in Source Domain]')
        self.sess.run(tf.global_variables_initializer())
        self.train_itr = len(self.training_data[0]) // self.bs

        self.best_val_accuracy = []
        self.best_val_loss = []

        for e in range(1, self.eps + 1):
            _tr_img, _tr_lab = DA_init.shuffle_data(self.training_data[0],
                                                    self.training_data[1])

            training_acc = 0.0
            training_loss = 0.0

            for itr in range(self.train_itr):
                _tr_img_batch, _tr_lab_batch = DA_init.next_batch(
                    _tr_img, _tr_lab, self.bs, itr)
                _train_accuracy, _train_loss, _ = self.sess.run(
                    [self.accuracy, self.loss, self.train_op],
                    feed_dict={
                        self.x: _tr_img_batch,
                        self.y: _tr_lab_batch,
                        self.is_training: True
                    })
                training_acc += _train_accuracy
                training_loss += _train_loss

            summary = self.sess.run(self.merged,
                                    feed_dict={
                                        self.x: _tr_img_batch,
                                        self.y: _tr_lab_batch,
                                        self.is_training: False
                                    })

            training_acc = float(training_acc / self.train_itr)
            training_loss = float(training_loss / self.train_itr)

            validation_acc, validation_loss = self.validation_procedure()
            self.best_val_accuracy.append(validation_acc)
            self.best_val_loss.append(validation_loss)

            log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \
                   "Loss_validation: [%g], Time: [%s]" % \
                   (e, training_acc, validation_acc, training_loss, validation_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_training_accuracy.append(training_acc)
            self.plt_training_loss.append(training_loss)
            self.plt_validation_accuracy.append(validation_acc)
            self.plt_validation_loss.append(validation_loss)

            utils.plotAccuracy(x=self.plt_epoch,
                               y1=self.plt_training_accuracy,
                               y2=self.plt_validation_accuracy,
                               figName=self.model,
                               line1Name='training',
                               line2Name='validation',
                               savePath=self.ckptDir)

            utils.plotLoss(x=self.plt_epoch,
                           y1=self.plt_training_loss,
                           y2=self.plt_validation_loss,
                           figName=self.model,
                           line1Name='training',
                           line2Name='validation',
                           savePath=self.ckptDir)

            utils.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess,
                            self.ckptDir + self.model + '-' + str(e))

            self.test_procedure()

        self.best_val_index = self.best_val_accuracy.index(
            max(self.best_val_accuracy))
        log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % (
            self.best_val_accuracy[self.best_val_index],
            self.best_val_index + 1)
        utils.save2file(log2, self.ckptDir, self.model)

        self.best_val_index_loss = self.best_val_loss.index(
            min(self.best_val_loss))
        log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % (
            self.best_val_loss[self.best_val_index_loss],
            self.best_val_index_loss + 1)
        utils.save2file(log3, self.ckptDir, self.model)
Esempio n. 11
0
            lab_lib.shape), str(Nooverlap_images_lib.shape)))

    return img_lib, lab_lib, Nooverlap_images_lib


# 开始处理两个域的数据
print('start processing source domain data')

training_img, training_lab, training_nooverlap = gatherImages_Labels_NooverlapImages(
    sourceDomainPath, mode='training')
validation_img, validation_lab, _ = gatherImages_Labels_NooverlapImages(
    sourceDomainPath, mode='validation')
test_img, test_lab, _ = gatherImages_Labels_NooverlapImages(sourceDomainPath,
                                                            mode='test')

DA_init.savePickle([training_img, training_lab], experimentalPath,
                   'source_training.pkl')
DA_init.savePickle([validation_img, validation_lab], experimentalPath,
                   'source_validation.pkl')
DA_init.savePickle([test_img, test_lab], experimentalPath, 'source_test.pkl')
DA_init.savePickle(training_nooverlap, experimentalPath, 'source_target.pkl')

print('training image shape', str(training_img.shape))
print('training label shape', str(training_lab.shape))
print('training image mean/std', str(training_img.mean()),
      str(training_img.std()))

print('validation image shape', str(validation_img.shape))
print('validation label shape', str(validation_lab.shape))
print('validation image mean/std', str(validation_img.mean()),
      str(validation_img.std()))
Esempio n. 12
0
    def train(self):
        print('Reloading parameters from pre-trained source domain')
        self.sess.run(tf.global_variables_initializer())
        self.saver.restore(self.sess, self.reloadPath)
        print('Reloading finished')

        self.itr_epoch = len(self.training_data[0]) // self.bs
        self.total_iteration = self.eps * self.itr_epoch

        self.best_val_accuracy = []
        self.best_val_loss = []

        training_acc = 0.0
        training_loss = 0.0

        for itr in range(1, self.total_iteration + 1):
            _tr_img_batch, _tr_lab_batch = DA_init.next_batch(
                image=self.training_data[0],
                label=self.training_data[1],
                batch_size=self.bs)

            _train_accuracy, _train_loss, _ = self.sess.run(
                [self.accuracy, self.loss, self.train_op],
                feed_dict={
                    self.x: _tr_img_batch,
                    self.y: _tr_lab_batch,
                    self.is_training: True
                })
            training_acc += _train_accuracy
            training_loss += _train_loss

            if itr % self.itr_epoch == 0:
                _current_eps = int(itr / self.itr_epoch)
                summary = self.sess.run(self.merged,
                                        feed_dict={
                                            self.x: _tr_img_batch,
                                            self.y: _tr_lab_batch,
                                            self.is_training: False
                                        })

                training_acc = float(training_acc / self.itr_epoch)
                training_loss = float(training_loss / self.itr_epoch)

                validation_acc, validation_loss = self.validation_procedure()
                self.best_val_accuracy.append(validation_acc)
                self.best_val_loss.append(validation_loss)

                log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \
                       "Loss_validation: [%g], Time: [%s]" % \
                       (_current_eps, training_acc, validation_acc, training_loss, validation_loss,
                        time.ctime(time.time()))

                self.plt_epoch.append(_current_eps)
                self.plt_training_accuracy.append(training_acc)
                self.plt_training_loss.append(training_loss)
                self.plt_validation_accuracy.append(validation_acc)
                self.plt_validation_loss.append(validation_loss)

                FineTuning_utils.plotAccuracy(x=self.plt_epoch,
                                              y1=self.plt_training_accuracy,
                                              y2=self.plt_validation_accuracy,
                                              figName=self.model,
                                              line1Name='training',
                                              line2Name='validation',
                                              savePath=self.ckptDir)

                FineTuning_utils.plotLoss(x=self.plt_epoch,
                                          y1=self.plt_training_loss,
                                          y2=self.plt_validation_loss,
                                          figName=self.model,
                                          line1Name='training',
                                          line2Name='validation',
                                          savePath=self.ckptDir)

                FineTuning_utils.save2file(log1, self.ckptDir, self.model)

                self.writer.add_summary(summary, _current_eps)

                self.saver.save(
                    self.sess,
                    self.ckptDir + self.model + '-' + str(_current_eps))

                self.test_procedure()

                training_acc = 0.0
                training_loss = 0.0

        self.best_val_index = self.best_val_accuracy.index(
            max(self.best_val_accuracy))
        log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % (
            self.best_val_accuracy[self.best_val_index],
            self.best_val_index + 1)
        FineTuning_utils.save2file(log2, self.ckptDir, self.model)

        self.best_val_index_loss = self.best_val_loss.index(
            min(self.best_val_loss))
        log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % (
            self.best_val_loss[self.best_val_index_loss],
            self.best_val_index_loss + 1)
        FineTuning_utils.save2file(log3, self.ckptDir, self.model)