def main():
    parser = argparse.ArgumentParser(
        description='Example of Keras Data Generator')
    parser.add_argument('--train_dir', default='data/cifar/train/')
    parser.add_argument('--test_dir', default='data/cifar/train/')
    parser.add_argument('--label_file', default='data/cifar/labels.txt')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=32,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    args = parser.parse_args()

    print('batchsize: {}'.format(args.batchsize))
    print('epoch: {}'.format(args.epoch))

    # Datasets
    train_dir = pathlib.Path(args.train_dir)
    train_datagen = ImageDataGenerator()
    test_dir = pathlib.Path(args.test_dir)
    test_datagen = ImageDataGenerator()
    classes = list(pd.read_csv(args.label_file, header=-1)[0])
    input_shape = (32, 32, 3)

    # Model
    model = build_model(input_shape, len(classes))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])
    model.summary()

    # Train
    model.fit_generator(
        generator=train_datagen.flow_from_directory(train_dir, classes),
        steps_per_epoch=int(
            np.ceil(len(list(train_dir.iterdir())) / args.batchsize)),
        epochs=args.epoch,
        verbose=1,
        validation_data=test_datagen.flow_from_directory(test_dir, classes),
        validation_steps=int(
            np.ceil(len(list(test_dir.iterdir())) / args.batchsize)))
Exemple #2
0
VAL_INTERVAL = 5000

# How often to save a model checkpoint
SAVE_INTERVAL = 10000

d_iter = 1
g_iter = 1

config = tf.ConfigProto()
config.gpu_options.allow_growth = True

# Initalize the data generator seperately for the training and validation set
train_generator = ImageDataGenerator(batch_size=FLAGS.batch_size,
                                     height=FLAGS.feature_size,
                                     width=FLAGS.feature_size,
                                     z_dim=FLAGS.noise_dim,
                                     scale_size=(FLAGS.image_size,
                                                 FLAGS.image_size),
                                     mode='train')


def my_train():
    with tf.Graph().as_default():
        sess = tf.Session(config=config)
        model = FaceAging(sess=sess,
                          lr=FLAGS.learning_rate,
                          keep_prob=1.,
                          model_num=FLAGS.model_index,
                          batch_size=FLAGS.batch_size,
                          age_loss_weight=FLAGS.age_loss_weight,
                          gan_loss_weight=FLAGS.gan_loss_weight,
Exemple #3
0
checkpoint_path = ".\\model\\checkpoints"
"""
Main Part of the finetuning Script.
"""

# Create parent path if it doesn't exist
if not os.path.exists(filewriter_path):
    os.mkdir(filewriter_path)
if not os.path.exists(checkpoint_path):
    os.mkdir(checkpoint_path)

# Place data loading and preprocessing on the cpu
with tf.device('/cpu:0'):
    tr_data = ImageDataGenerator(train_file,
                                 mode='training',
                                 batch_size=batch_size,
                                 num_classes=num_classes,
                                 shuffle=True)
    val_data = ImageDataGenerator(val_file,
                                  mode='inference',
                                  batch_size=batch_size,
                                  num_classes=num_classes,
                                  shuffle=False)

    # create an reinitializable iterator given the dataset structure
    iterator = Iterator.from_structure(tr_data.data.output_types,
                                       tr_data.data.output_shapes)
    next_batch = iterator.get_next()

# Ops for initializing the two different iterators
training_init_op = iterator.make_initializer(tr_data.data)
def train(model, saver, sess, exp_string, train_file_list, test_file, resume_itr=0):
    SUMMARY_INTERVAL = 100
    SAVE_INTERVAL = 10000
    PRINT_INTERVAL = 100
    TEST_PRINT_INTERVAL = 100
    dropout_rate = 0.5

    if FLAGS.log:
        train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string, sess.graph)
    print('Done initializing, starting training.')
    prelosses, postlosses = [], []

    num_classes = FLAGS.num_classes # for classification, 1 otherwise

    # Defining data loaders
    with tf.device('/cpu:0'):
        tr_data_list = []
        train_iterator_list = []
        train_next_list = []
        
        for i in range(len(train_file_list)):
            tr_data = ImageDataGenerator(train_file_list[i],
                                     dataroot=FLAGS.dataroot,
                                     mode='training',
                                     batch_size=FLAGS.meta_batch_size,
                                     num_classes=num_classes,
                                     shuffle=True)
            tr_data_list.append(tr_data)
            
            train_iterator_list.append(Iterator.from_structure(tr_data_list[i].data.output_types,
                                           tr_data_list[i].data.output_shapes))
            train_next_list.append(train_iterator_list[i].get_next())

        test_data = ImageDataGenerator(test_file,
                                      dataroot=FLAGS.dataroot,
                                      mode='inference',
                                      batch_size=1,
                                      num_classes=num_classes,
                                      shuffle=False)
        
        test_iterator = Iterator.from_structure(test_data.data.output_types,
                                           test_data.data.output_shapes)
        test_next_batch = test_iterator.get_next()


        # create an reinitializable iterator given the dataset structure
        
    # Ops for initializing different iterators
    training_init_op = []
    train_batches_per_epoch = []
    for i in range(len(train_file_list)):
        training_init_op.append(train_iterator_list[i].make_initializer(tr_data_list[i].data))
        train_batches_per_epoch.append(int(np.floor(tr_data_list[i].data_size/FLAGS.meta_batch_size)))
    
    test_init_op = test_iterator.make_initializer(test_data.data)
    test_batches_per_epoch = int(np.floor(test_data.data_size / 1))
    
    
    # Training begins
    
    for itr in range(resume_itr, FLAGS.pretrain_iterations + FLAGS.metatrain_iterations):
        feed_dict = {}
        
        # Sampling training and test tasks
        num_training_tasks = len(train_file_list)
        num_meta_train = num_training_tasks-1
        num_meta_test = num_training_tasks-num_meta_train
        
        # Randomly choosing meta train and meta test domains
        task_list = np.random.permutation(num_training_tasks)
        meta_train_index_list = task_list[:num_meta_train]
        meta_test_index_list = task_list[num_meta_train:]
        
        for i in range(len(train_file_list)):
            if itr%train_batches_per_epoch[i] == 0:
                sess.run(training_init_op[i])
        
        # Populating input tensors

        # Sampling meta train data
        for i in range(num_meta_train):
            
            task_ind = meta_train_index_list[i]
            if i == 0:
                inputa, labela = sess.run(train_next_list[task_ind])
            else:
                inp_tmp, lab_tmp = sess.run(train_next_list[task_ind])
                inputa = np.concatenate((inputa, inp_tmp), axis=0)
                labela = np.concatenate((labela, lab_tmp), axis=0)
        
        inputs_all = list(zip(inputa, labela))
        shuffle(inputs_all)
        inputa, labela = zip(*inputs_all)
        
        # Sampling meta test data
        for i in range(num_meta_test):
            
            task_ind = meta_test_index_list[i]
            if i == 0:
                inputb, labelb = sess.run(train_next_list[task_ind])
            else:
                inp_tmp, lab_tmp = sess.run(train_next_list[task_ind])
                inputb = np.concatenate((inputb, inp_tmp), axis=0)
                labelb = np.concatenate((labelb, lab_tmp), axis=0)
        
        
        feed_dict = {model.inputa: inputa, model.inputb: inputb,  model.labela: labela, model.labelb: labelb, model.KEEP_PROB: dropout_rate}
        
        if itr<FLAGS.pretrain_iterations:
            input_tensors = [model.pretrain_op]
        else:    
            input_tensors = [model.metatrain_op]

        if (itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0):
            input_tensors.extend([model.summ_op, model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
            input_tensors.extend([model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]])

        result = sess.run(input_tensors, feed_dict)

        if itr % SUMMARY_INTERVAL == 0:
            prelosses.append(result[-2])
            if FLAGS.log:
                train_writer.add_summary(result[1], itr)
            postlosses.append(result[-1])

        if (itr!=0) and itr % PRINT_INTERVAL == 0:
            print_str = 'Iteration ' + str(itr - FLAGS.pretrain_iterations)
            print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
            print(print_str)
            prelosses, postlosses = [], []

        if (itr!=0) and itr % SAVE_INTERVAL == 0:
            saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))


        # Testing periodically
        if itr % TEST_PRINT_INTERVAL == 0:
            test_acc = 0.
            test_loss = 0.
            test_count = 0
            sess.run(test_init_op)
            for it in range(test_batches_per_epoch):	
                
                test_input, test_label = sess.run(test_next_batch)
                
                feed_dict = {model.test_input: test_input, model.test_label: test_label, model.KEEP_PROB: 1.}
                input_tensors = [model.test_loss, model.test_acc]

                result = sess.run(input_tensors, feed_dict)
                test_loss += result[0]
                test_acc += result[1]
                test_count += 1
                
            print('Validation results: Iteration %d, Loss: %f, Accuracy: %f' %(itr, test_loss/test_count, test_acc/test_count))

    saver.save(sess, FLAGS.logdir + '/' + exp_string +  '/model' + str(itr))
Exemple #5
0
GPU_MEMORY_FRACTION = 0.95
num_classes=10
display_step=100
train_set_dir='image_label.txt'
logdir='./log'
checkpoint_path='./modelfiles'
trainlog=open('train_log.txt','w')

if not os.path.isdir(checkpoint_path):
    os.mkdir(checkpoint_path)


with tf.device('/cpu:0'):
    tr_data = ImageDataGenerator(train_set_dir,
                                 batch_size=BATCH_SIZE,
                                 num_classes=num_classes,
                                 shuffle=True)
    print(tr_data)
    iterator = Iterator.from_structure(tr_data.data.output_types,
                                       tr_data.data.output_shapes)
    next_batch = iterator.get_next()
    training_init_op = iterator.make_initializer(tr_data.data)


    global_step = tf.Variable(0, trainable=False)

    
    x = tf.placeholder(tf.float32, [BATCH_SIZE, 112, 96, 3])
    y = tf.placeholder(tf.float32, [BATCH_SIZE, num_classes])
    learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate') 
Exemple #6
0
if EPOCH_TO_LOAD > 0:
    model, is_loaded = load_model(CHECKPOINTS_FOLDER + MODEL_JSON,
                                  CHECKPOINTS_FOLDER + MODEL_H5)
else:
    if IMAGE_FIRST_DIM == 64:
        model = cnn64(IMAGE_FIRST_DIM, N_COLORS)
    if IMAGE_FIRST_DIM == 128:
        model = cnn128(IMAGE_FIRST_DIM, N_COLORS)

starting_lr = 0.1
adam = Adam(lr=starting_lr)
sgd = SGD(lr=0.05, momentum=0.9, decay=0.0005)
model.compile(loss='binary_crossentropy', optimizer=adam, metrics=[beta_score])

datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = datagen.flow_from_directory("datas/train",
                                              target_size=(IMAGE_FIRST_DIM,
                                                           IMAGE_FIRST_DIM),
                                              batch_size=BATCH_SIZE,
                                              class_mode="multilabel",
                                              multilabel_classes=label_dict)

val_generator = datagen.flow_from_directory("datas/validation",
                                            target_size=(IMAGE_FIRST_DIM,
                                                         IMAGE_FIRST_DIM),
                                            batch_size=1,
                                            shuffle=False,
                                            class_mode="multilabel",
                                            multilabel_classes=label_dict)
Exemple #7
0
                    "folder that contains images")
#na li end

flags.DEFINE_string("test_data_dir", './images/test/', "test images")

flags.DEFINE_string("train_data_dir", None, "train images")  #no need for test

FLAGS = flags.FLAGS

config = tf.ConfigProto()
config.gpu_options.allow_growth = True

generator = ImageDataGenerator(batch_size=FLAGS.batch_size,
                               height=FLAGS.feature_size,
                               width=FLAGS.feature_size,
                               z_dim=FLAGS.noise_dim,
                               scale_size=(FLAGS.image_size, FLAGS.image_size),
                               shuffle=False,
                               root_folder=FLAGS.root_folder,
                               mode='train')

val_generator = ImageDataGenerator(batch_size=FLAGS.batch_size,
                                   height=FLAGS.feature_size,
                                   width=FLAGS.feature_size,
                                   z_dim=FLAGS.noise_dim,
                                   scale_size=(FLAGS.image_size,
                                               FLAGS.image_size),
                                   shuffle=False,
                                   root_folder=FLAGS.root_folder,
                                   mode='test')

Exemple #8
0
    image = image.astype(np.float32)
    return image


def mask_preprocess(mask):
    label = mask / 255
    label = (label >= 0.5).astype(np.bool)
    label = to_categorical(label, num_classes=2)
    return label


if __name__ == "__main__":

    cfg = args()
    image_datagen_train = ImageDataGenerator(
        rotation_range=12,
        horizontal_flip=True,
        preprocessing_function=image_preprocess)
    mask_datagen_train = ImageDataGenerator(
        n_categorical=2,
        rotation_range=12,
        horizontal_flip=True,
        preprocessing_function=mask_preprocess)
    image_generator_train = image_datagen_train.flow_from_directory(
        os.path.join(cfg.train_dir, 'images'),
        target_size=cfg.input_shape[:-1],
        color_mode='rgb',
        class_mode=None,
        seed=1,
        batch_size=cfg.batch_size)
    mask_generator_train = mask_datagen_train.flow_from_directory(
        os.path.join(cfg.train_dir, 'masks'),
Exemple #9
0
def train(model, saver, sess, train_file_list, test_file, args, resume_itr=0):

    if args.log:
        train_writer = tf.summary.FileWriter(
            args.log_dir + '/' + args.phase + '/', sess.graph)

    # Data loaders
    with tf.device('/cpu:0'):
        tr_data_list, train_iterator_list, train_next_list = [], [], []
        for i in range(len(train_file_list)):
            tr_data = ImageDataGenerator(train_file_list[i], mode='training', \
                                         batch_size=args.meta_batch_size, num_classes=args.n_class, shuffle=True)
            tr_data_list.append(tr_data)
            train_iterator_list.append(
                tf.data.Iterator.from_structure(tr_data.data.output_types,
                                                tr_data.data.output_shapes))
            train_next_list.append(train_iterator_list[i].get_next())

    # Ops for initializing different iterators
    training_init_op = []
    train_batches_per_epoch = []
    for i in range(len(train_file_list)):
        training_init_op.append(train_iterator_list[i].make_initializer(
            tr_data_list[i].data))
        sess.run(training_init_op[i]
                 )  # initialize training sample generator at itr=0

    # Training begins
    best_test_dice = 0
    best_test_haus = 0
    for epoch in xrange(0, args.epoch):
        for itr in range(resume_itr, args.train_iterations):
            start = time.time()
            # Sampling training and test tasks
            num_training_tasks = len(train_file_list)
            num_meta_train = 2  #num_training_tasks-1
            num_meta_test = 1  #num_training_tasks-num_meta_train  # as setting num_meta_test = 1

            # Randomly choosing meta train and meta test domains
            task_list = np.random.permutation(num_training_tasks)
            meta_train_index_list = task_list[:2]
            meta_test_index_list = task_list[-1:]

            # Sampling meta-train, meta-test data
            for i in range(num_meta_train):
                task_ind = meta_train_index_list[i]
                if i == 0:
                    inputa, labela = sess.run(train_next_list[task_ind])
                elif i == 1:
                    inputa1, labela1 = sess.run(train_next_list[task_ind])
                else:
                    raise RuntimeError('check number of meta-train domains.')

            for i in range(num_meta_test):
                task_ind = meta_test_index_list[i]
                if i == 0:
                    inputb, labelb = sess.run(train_next_list[task_ind])
                else:
                    raise RuntimeError('check number of meta-test domains.')

            input_group = np.concatenate((inputa[:2], inputa1[:1], inputb[:2]),
                                         axis=0)
            label_group = np.concatenate((labela[:2], labela1[:1], labelb[:2]),
                                         axis=0)

            contour_group, metric_label_group = _get_coutour_sample(
                label_group)

            feed_dict = {model.inputa: inputa, model.labela: labela, \
                         model.inputa1: inputa1, model.labela1: labela1, \
                         model.inputb: inputb, model.labelb: labelb, \
                         model.input_group:input_group, \
                         model.label_group:label_group, \
                         model.contour_group:contour_group, \
                         model.metric_label_group:metric_label_group, \
                         model.KEEP_PROB: 1.0}

            output_tensors = [
                model.task_train_op, model.meta_train_op, model.metric_train_op
            ]
            output_tensors.extend([
                model.summ_op, model.seg_loss_b, model.compactness_loss_b,
                model.smoothness_loss_b, model.target_loss, model.source_loss
            ])
            _, _, _, summ_writer, seg_loss_b, compactness_loss_b, smoothness_loss_b, target_loss, source_loss = sess.run(
                output_tensors, feed_dict)
            # output_tensors = [model.task_train_op]
            # output_tensors.extend([model.source_loss])
            # _, source_loss = sess.run(output_tensors, feed_dict)

            if itr % args.print_interval == 0:
                logging.info(
                    "Epoch: [%2d] [%6d/%6d] time: %4.4f inner lr:%.8f outer lr:%.8f"
                    % (epoch, itr, args.train_iterations,
                       (time.time() - start), model.inner_lr.eval(),
                       model.outer_lr.eval()))
                logging.info(
                    'sou_loss: %.7f, tar_loss: %.7f, tar_seg_loss: %.7f, tar_compactness_loss: %.7f, tar_smoothness_loss: %.7f'
                    % (source_loss, target_loss, seg_loss_b,
                       compactness_loss_b, smoothness_loss_b))

            if itr % args.summary_interval == 0:
                train_writer.add_summary(summ_writer, itr)
                train_writer.flush()

            if (itr != 0) and itr % args.save_freq == 0:
                saver.save(
                    sess, args.checkpoint_dir + '/epoch_' + str(epoch) +
                    '_itr_' + str(itr) + ".model.cpkt")

            # Testing periodically
            if (itr != 0) and itr % args.test_freq == 0:
                test_dice, test_dice_arr, test_haus, test_haus_arr = test(
                    sess, test_file, model, args)

                if test_dice > best_test_dice:
                    best_test_dice = test_dice

                with open((os.path.join(args.log_dir, 'eva.txt')), 'a') as f:
                    print >> f, 'Iteration %d :' % (itr)
                    print >> f, '	Unseen domain testing results: Dice: %f' % (
                        test_dice), test_dice_arr
                    print >> f, '	Current best accuracy %f' % (best_test_dice)
                    print >> f, '	Unseen domain testing results: Haus: %f' % (
                        test_haus), test_haus_arr
                    print >> f, '	Current best accuracy %f' % (best_test_haus)
Exemple #10
0
def train(model,
          saver,
          sess,
          exp_string,
          train_file_list,
          test_file,
          resume_itr=0):

    if FLAGS.log:
        train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string,
                                             sess.graph)
    source_losses, target_losses, source_accuracies, target_accuracies = [], [], [], []

    # Data loaders
    with tf.device('/cpu:0'):
        tr_data_list, train_iterator_list, train_next_list = [], [], []
        for i in range(len(train_file_list)):
            tr_data = ImageDataGenerator(train_file_list[i], dataroot=FLAGS.dataroot, mode='training', \
                                         batch_size=FLAGS.meta_batch_size, num_classes=FLAGS.num_classes, shuffle=True)
            tr_data_list.append(tr_data)
            train_iterator_list.append(
                tf.data.Iterator.from_structure(tr_data.data.output_types,
                                                tr_data.data.output_shapes))
            train_next_list.append(train_iterator_list[i].get_next())

        test_data = ImageDataGenerator(test_file, dataroot=FLAGS.dataroot, mode='inference', \
                                       batch_size=1, num_classes=FLAGS.num_classes, shuffle=False)
        test_iterator = tf.data.Iterator.from_structure(
            test_data.data.output_types, test_data.data.output_shapes)
        test_next_batch = test_iterator.get_next()

    # Ops for initializing different iterators
    training_init_op = []
    train_batches_per_epoch = []
    for i in range(len(train_file_list)):
        training_init_op.append(train_iterator_list[i].make_initializer(
            tr_data_list[i].data))
        train_batches_per_epoch.append(
            int(np.floor(tr_data_list[i].data_size / FLAGS.meta_batch_size)))

    test_init_op = test_iterator.make_initializer(test_data.data)
    test_batches_per_epoch = int(np.floor(test_data.data_size / 1))

    # Training begins
    best_test_acc = 0
    for itr in range(resume_itr, FLAGS.train_iterations):

        # Sampling training and test tasks
        num_training_tasks = len(train_file_list)
        num_meta_train = num_training_tasks - 1
        num_meta_test = num_training_tasks - num_meta_train  # as setting num_meta_test = 1

        # Randomly choosing meta train and meta test domains
        task_list = np.random.permutation(num_training_tasks)
        meta_train_index_list = task_list[:num_meta_train]
        meta_test_index_list = task_list[num_meta_train:]

        for i in range(len(train_file_list)):
            if itr % train_batches_per_epoch[i] == 0:
                sess.run(training_init_op[i]
                         )  # initialize training sample generator at itr=0

        # Sampling meta-train, meta-test data
        for i in range(num_meta_train):
            task_ind = meta_train_index_list[i]
            if i == 0:
                inputa, labela = sess.run(train_next_list[task_ind])
            elif i == 1:
                inputa1, labela1 = sess.run(train_next_list[task_ind])
            else:
                raise RuntimeError('check number of meta-train domains.')

        for i in range(num_meta_test):
            task_ind = meta_test_index_list[i]
            if i == 0:
                inputb, labelb = sess.run(train_next_list[task_ind])
            else:
                raise RuntimeError('check number of meta-test domains.')

        # to avoid a certain un-sampled class affect stability of of global class alignment
        # i.e., mask-out the un-sampled class from computing kd-loss
        sampledb = np.unique(np.argmax(labelb, axis=1))
        sampleda = np.unique(np.argmax(labela, axis=1))
        bool_indicator_b_a = [0.0] * FLAGS.num_classes
        for i in range(FLAGS.num_classes):
            # only count class that are sampled in both source domains
            if (i in sampledb) and (i in sampleda):
                bool_indicator_b_a[i] = 1.0

        sampledb = np.unique(np.argmax(labelb, axis=1))
        sampleda1 = np.unique(np.argmax(labela1, axis=1))
        bool_indicator_b_a1 = [0.0] * FLAGS.num_classes
        for i in range(FLAGS.num_classes):
            if (i in sampledb) and (i in sampleda1):
                bool_indicator_b_a1[i] = 1.0

        part = FLAGS.meta_batch_size / 3
        input_group = np.concatenate(
            (inputa[:part], inputa1[:part], inputb[:part]), axis=0)
        label_group = np.concatenate(
            (labela[:part], labela1[:part], labelb[:part]), axis=0)
        group_list = np.sum(label_group, axis=0)
        label_group = np.argmax(
            label_group,
            axis=1)  # transform one-hot labels into class-wise integer

        feed_dict = {model.inputa: inputa, model.labela: labela, \
                     model.inputa1: inputa1, model.labela1: labela1, \
                     model.inputb: inputb, model.labelb: labelb, \
                     model.input_group: input_group, model.label_group: label_group,\
                     model.bool_indicator_b_a: bool_indicator_b_a, model.bool_indicator_b_a1: bool_indicator_b_a1,
                     model.KEEP_PROB: 0.5}

        output_tensors = [
            model.task_train_op, model.meta_train_op, model.metric_train_op
        ]
        output_tensors.extend([
            model.summ_op, model.global_loss, model.source_loss,
            model.source_accuracy, model.metric_loss
        ])
        _, _, _, summ_writer, global_loss, source_loss, source_accuracy, metric_loss = sess.run(
            output_tensors, feed_dict)

        source_losses.append(source_loss)
        source_accuracies.append(source_accuracy)

        if itr % FLAGS.print_interval == 0:
            print('---' * 10 + '\n%s' % exp_string)
            print('number of samples per category:', group_list)
            print('global loss: %.7f' % global_loss)
            print('metric_loss: %.7f ' % metric_loss)
            print('Iteration %d' % itr + ': Loss ' + 'training domains ' +
                  str(np.mean(source_losses)))
            print('Iteration %d' % itr + ': Accuracy ' + 'training domains ' +
                  str(np.mean(source_accuracies)))
            source_losses, target_losses = [], []

        if itr % FLAGS.summary_interval == 0 and FLAGS.log:
            train_writer.add_summary(summ_writer, itr)

        if (itr != 0) and itr % FLAGS.save_interval == 0:
            saver.save(sess,
                       FLAGS.logdir + '/' + exp_string + '/model' + str(itr))

        # Testing periodically
        class_accs = [0.0] * FLAGS.num_classes
        class_samples = [0.0] * FLAGS.num_classes
        if itr % FLAGS.test_print_interval == 0:
            test_acc, test_loss, test_count = 0.0, 0.0, 0.0
            sess.run(test_init_op)  # initialize testing data generator
            for it in range(test_batches_per_epoch):
                test_input, test_label = sess.run(test_next_batch)
                feed_dict = {
                    model.test_input: test_input,
                    model.test_label: test_label,
                    model.KEEP_PROB: 1.
                }
                output_tensors = [model.test_loss, model.test_acc]
                result = sess.run(output_tensors, feed_dict)
                test_loss += result[0]
                test_acc += result[1]
                test_count += 1
                this_class = np.argmax(test_label, axis=1)[0]
                class_accs[this_class] += result[1]  # added for debug
                class_samples[this_class] += 1
            test_acc = test_acc / test_count
            if test_acc > best_test_acc:
                best_test_acc = test_acc
                saver.save(
                    sess, FLAGS.logdir + '/' + exp_string + '/itr' + str(itr) +
                    '_model_acc' + str(best_test_acc))

            print(
                'Unseen Target Validation results: Iteration %d, Loss: %f, Accuracy: %f'
                % (itr, test_loss / test_count, test_acc))
            print('Current best accuracy {}'.format(best_test_acc))

            with open((os.path.join(FLAGS.logdir, exp_string, 'eva.txt')),
                      'a') as fle:
                fle.write(
                    'Unseen Target Validation results: Iteration %d, Loss: %f, Accuracy: %f \n'
                    % (itr, test_loss / test_count, test_acc))
Exemple #11
0
def train(model, saver, sess, exp_string, pairs_dir, resume_itr=0):

    if FLAGS.log:
        train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string,
                                             sess.graph)
    support_losses, query_losses, support_accuracies, query_accuracies = [], [], [], []

    # Data loaders
    with tf.device('/cpu:0'):
        tr_data_list, train_iterator_list, train_next_list = [], [], []
        for i in range(len(pairs_dir)):
            # get support set and query set
            pair_name = os.path.basename(pairs_dir[i])
            s_set, q_set = pair_name.split('-')
            s_set_list = os.path.join(pairs_dir[i], s_set + '.txt')
            q_set_list = os.path.join(pairs_dir[i], q_set + '.txt')

            s_set_data = ImageDataGenerator(s_set_list, batch_size=FLAGS.meta_batch_size, \
                                            num_classes=FLAGS.num_classes, shuffle=False)
            q_set_data = ImageDataGenerator(q_set_list, batch_size=FLAGS.meta_batch_size, \
                                            num_classes=FLAGS.num_classes, shuffle=False)
            s_iterator = tf.data.Iterator.from_structure(s_set_data.data.output_types, \
                                                             s_set_data.data.output_shapes)
            q_iterator = tf.data.Iterator.from_structure(q_set_data.data.output_types, \
                                                             q_set_data.data.output_shapes)
            tr_data_list.append((s_set_data, q_set_data))
            train_iterator_list.append((s_iterator, q_iterator))
            train_next_list.append(
                (s_iterator.get_next(), q_iterator.get_next()))

    # Ops for initializing different iterators
    training_init_op = []
    s_batches_per_epoch, s_batch_marker = [], []
    q_batches_per_epoch, q_batch_marker = [], []
    for i in range(len(pairs_dir)):
        s_init = train_iterator_list[i][0].make_initializer(
            tr_data_list[i][0].data)
        q_init = train_iterator_list[i][1].make_initializer(
            tr_data_list[i][1].data)
        training_init_op.append((s_init, q_init))

        s_batches_per_epoch.append(
            int(np.floor(tr_data_list[i][0].data_size /
                         FLAGS.meta_batch_size)))
        q_batches_per_epoch.append(
            int(np.floor(tr_data_list[i][1].data_size /
                         FLAGS.meta_batch_size)))
        s_batch_marker = s_batches_per_epoch[:]
        q_batch_marker = q_batches_per_epoch[:]

    # Training begins
    print("Start training.")
    best_test_acc = 0
    start_time = time.time()
    # Initialize training iterator when itr=0 or it using out
    for i in range(len(pairs_dir)):
        sess.run(training_init_op[i][0])
        sess.run(training_init_op[i][1])

    for itr in range(resume_itr, FLAGS.train_iterations):

        # Sample a pair
        sampled_index = random.randint(0, len(pairs_dir) - 1)
        sampled_pair = train_next_list[sampled_index]

        s_batch_marker[sampled_index] = s_batch_marker[sampled_index] - 1
        q_batch_marker[sampled_index] = q_batch_marker[sampled_index] - 1
        if s_batch_marker[sampled_index] <= 0:
            sess.run(training_init_op[sampled_index][0])
            s_batch_marker[sampled_index] = s_batches_per_epoch[sampled_index]
        if q_batch_marker[sampled_index] <= 0:
            sess.run(training_init_op[sampled_index][1])
            q_batch_marker[sampled_index] = q_batches_per_epoch[sampled_index]

        # Get sampled data
        inputa, labela, namea = sess.run(sampled_pair[0])
        inputb, labelb, nameb = sess.run(sampled_pair[1])

        # shuffle query set
        inputb, labelb, nameb = suffule_line(inputb, labelb, nameb)

        feed_dict = {model.inputa: inputa, model.labela: labela, \
                     model.inputb: inputb, model.labelb: labelb, \
                     model.KEEP_PROB: 1.0}

        output_tensors = [model.task_train_op]
        output_tensors.extend([
            model.summ_op, model.lossa, model.meta_loss, model.accuracya,
            model.accuracyb
        ])
        _, summ_writer, support_loss, query_loss, support_acc, query_acc = sess.run(
            output_tensors, feed_dict)

        support_losses.append(support_loss)
        query_losses.append(query_loss)
        support_accuracies.append(support_acc)
        query_accuracies.append(query_acc)

        if itr % FLAGS.print_interval == 0:
            end_time = time.time()
            print('---' * 10 + '\n%s' % exp_string)
            print('time %.4f s' % (end_time - start_time))
            print('Iteration %d' % itr + ': S Loss ' +
                  str(np.mean(support_losses)))
            print('Iteration %d' % itr + ': Q Loss ' +
                  str(np.mean(query_losses)))
            print('Iteration %d' % itr + ': S Accuracy ' +
                  str(np.mean(support_accuracies)))
            print('Iteration %d' % itr + ': Q Accuracy ' +
                  str(np.mean(query_accuracies)))
            support_losses, query_losses, target_losses = [], [], []
            start_time = time.time()

        if (itr != 0) and itr % FLAGS.infer_interval == 0:
            saver.save(sess,
                       FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
            os.system('./infer.sh 2>&1 | tee -a 0516-3-infer.log')
Exemple #12
0
def evaluation_after_training(model, sess, evaluation_file_list, evaluation_batch_size=1):
    if FLAGS.masf_mode:
        method_ = "MASF"
    else:
        method_ = "DeepAll"

    # Data loaders:
    with tf.device('/cpu:0'):
        evaluation_data_list, evaluation_iterator_list, evaluation_next_list = [], [], []
        for i in range(len(evaluation_file_list)):
            evaluation_data = ImageDataGenerator(evaluation_file_list[i], dataroot=FLAGS.dataroot, mode='inference', \
                                                 batch_size=evaluation_batch_size, num_classes=FLAGS.num_classes,
                                                 shuffle=False)
            evaluation_data_list.append(evaluation_data)
            evaluation_iterator_list.append(
                tf.data.Iterator.from_structure(evaluation_data.data.output_types, evaluation_data.data.output_shapes))
            evaluation_next_list.append(evaluation_iterator_list[i].get_next())

    # Ops for initializing different iterators:
    evaluation_init_op = []
    evaluation_batches_per_epoch = []
    for i in range(len(evaluation_file_list)):
        evaluation_init_op.append(evaluation_iterator_list[i].make_initializer(evaluation_data_list[i].data))
        evaluation_batches_per_epoch.append(int(np.floor(evaluation_data_list[i].data_size / evaluation_batch_size)))

    # Initialize iterator:
    num_total_domains = len(evaluation_file_list)
    for i in range(len(evaluation_file_list)):
        sess.run(evaluation_init_op[i])

        # Embedding:
    total_evaluation_embeddings = np.empty((0, FLAGS.feature_space_dimension))
    total_evaluation_labels_classwise = []
    total_evaluation_labels_domainwise = []
    total_evaluation_acc, total_evaluation_loss, total_evaluation_count = 0.0, 0.0, 0.0
    for domain_index in range(num_total_domains):
        class_accs = [0.0] * FLAGS.num_classes
        class_samples = [0.0] * FLAGS.num_classes
        evaluation_embeddings = np.zeros((evaluation_batches_per_epoch[domain_index], FLAGS.feature_space_dimension))
        evaluation_labels_classwise = np.zeros((evaluation_batches_per_epoch[domain_index],))
        evaluation_labels_domainwise = np.zeros((evaluation_batches_per_epoch[domain_index],))
        evaluation_acc, evaluation_loss, evaluation_count = 0.0, 0.0, 0.0
        for it in range(evaluation_batches_per_epoch[domain_index]):
            evaluation_input, evaluation_label_classwise = sess.run(evaluation_next_list[domain_index])
            feed_dict = {model.test_input: evaluation_input, model.test_label: evaluation_label_classwise,
                         model.KEEP_PROB: 1.}
            if FLAGS.masf_mode:
                output_tensors = [model.test_loss, model.test_acc, model.semantic_feature, model.outputs,
                                  model.metric_embedding]
            else:
                output_tensors = [model.test_loss, model.test_acc, model.embedding_feature, model.outputs]
            # --> model.semantic_feature: 4096-dimensional embedding with psi weights (for semantic features)
            # --> model.outputs: n_classes-dimensional embedding with theta weights (after softmax for cross-entropy)
            # --> model.metric_embedding: 256-dimensional embedding with phi weights (for metric features used in triplet loss)
            result = sess.run(output_tensors, feed_dict)
            evaluation_loss += result[0]
            total_evaluation_loss += result[0]
            evaluation_acc += result[1]
            total_evaluation_acc += result[1]
            evaluation_count += 1
            total_evaluation_count += 1
            this_class = np.argmax(evaluation_label_classwise, axis=1)[0]
            class_accs[this_class] += result[1]  # added for debug
            class_samples[this_class] += 1
            evaluation_embeddings[it, :] = result[2]
            evaluation_labels_classwise[it] = np.argmax(evaluation_label_classwise, axis=1)[0]
            evaluation_labels_domainwise[it] = domain_index
        evaluation_acc = evaluation_acc / evaluation_count
        evaluation_acc *= 100
        evaluation_loss = evaluation_loss / evaluation_count
        print('Evaluation results: Domain %d, Loss: %f, Accuracy: %f' % (domain_index, evaluation_loss, evaluation_acc))
        total_evaluation_embeddings = np.vstack((total_evaluation_embeddings, evaluation_embeddings))
        total_evaluation_labels_classwise.extend(evaluation_labels_classwise)
        total_evaluation_labels_domainwise.extend(evaluation_labels_domainwise)
        # save accuracy results:
        path_ = FLAGS.logdir + FLAGS.dataset + '/' + method_ + '/total_evaluation/accuracy/'
        if not os.path.exists(path_):
            os.makedirs(path_)
        with open((path_ + 'domain' + str(domain_index) + '_accuracy.txt'), 'a') as fle:
            fle.write('Evaluation results: Domain %d, Loss: %f, Accuracy: %f' % (
            domain_index, evaluation_loss, evaluation_acc))
    # save total average accuracy results (average over the whole evaulation data):
    total_evaluation_acc = total_evaluation_acc / total_evaluation_count
    total_evaluation_acc *= 100
    total_evaluation_loss_avg = total_evaluation_loss / total_evaluation_count
    path_ = FLAGS.logdir + FLAGS.dataset + '/' + method_ + '/total_evaluation/accuracy/'
    if not os.path.exists(path_):
        os.makedirs(path_)
    with open((path_ + "total_accuracy.txt"), 'a') as fle:
        fle.write('Evaluation results: Loss: %f, Accuracy: %f' % (total_evaluation_loss, total_evaluation_acc))

    # plot the classwise evaluation embedding:
    path_save1 = FLAGS.logdir + FLAGS.dataset + '/' + method_ + "/total_evaluation/plots/classwise/"
    path_save2 = FLAGS.logdir + FLAGS.dataset + '/' + method_ + "/total_evaluation/plots/domainwise/"
    name_save1, name_save2 = "embedding_classwise", "embedding_domainwise"
    plot_embedding_of_points_2PLOTS(embedding=total_evaluation_embeddings, labels1=total_evaluation_labels_classwise,
                                    labels2=total_evaluation_labels_domainwise,
                                    path_save1=path_save1, path_save2=path_save2,
                                    name_save1=name_save1, name_save2=name_save2, n_samples_plot=2000, method='TSNE')
Exemple #13
0
def train_DeepAll(model, saver, sess, exp_string, train_file_list, test_file, val_file, resume_itr=0):
    if FLAGS.log:
        train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + FLAGS.dataset + '/' + exp_string, sess.graph)
    source_losses, target_losses, source_accuracies, target_accuracies = [], [], [], []

    # Data loaders
    with tf.device('/cpu:0'):
        tr_data_list, train_iterator_list, train_next_list = [], [], []
        for i in range(len(train_file_list)):
            tr_data = ImageDataGenerator(train_file_list[i], dataroot=FLAGS.dataroot, mode='training', \
                                         batch_size=FLAGS.deep_all_batch_size, num_classes=FLAGS.num_classes,
                                         shuffle=True)
            tr_data_list.append(tr_data)
            train_iterator_list.append(
                tf.data.Iterator.from_structure(tr_data.data.output_types, tr_data.data.output_shapes))
            train_next_list.append(train_iterator_list[i].get_next())

        test_data = ImageDataGenerator(test_file, dataroot=FLAGS.dataroot, mode='inference', \
                                       batch_size=1, num_classes=FLAGS.num_classes, shuffle=False)
        test_iterator = tf.data.Iterator.from_structure(test_data.data.output_types, test_data.data.output_shapes)
        test_next_batch = test_iterator.get_next()

        val_data = ImageDataGenerator(val_file, dataroot=FLAGS.dataroot, mode='inference', \
                                      batch_size=1, num_classes=FLAGS.num_classes, shuffle=False)
        val_iterator = tf.data.Iterator.from_structure(val_data.data.output_types, val_data.data.output_shapes)
        val_next_batch = val_iterator.get_next()

    # Ops for initializing different iterators
    training_init_op = []
    train_batches_per_epoch = []
    for i in range(len(train_file_list)):
        training_init_op.append(train_iterator_list[i].make_initializer(tr_data_list[i].data))
        train_batches_per_epoch.append(int(np.floor(tr_data_list[i].data_size / FLAGS.deep_all_batch_size)))

    test_init_op = test_iterator.make_initializer(test_data.data)
    test_batches_per_epoch = int(np.floor(test_data.data_size / 1))

    val_init_op = val_iterator.make_initializer(val_data.data)
    val_batches_per_epoch = int(np.floor(val_data.data_size / 1))

    # Training begins
    best_test_acc, best_val_acc = 0, 0
    for itr in range(resume_itr, FLAGS.train_iterations):

        # Sampling training and test tasks
        num_training_tasks = len(train_file_list)

        for i in range(len(train_file_list)):
            if itr % train_batches_per_epoch[i] == 0 or resume_itr != 0:
                sess.run(training_init_op[i])  # initialize training sample generator at itr=0

        # Sampling
        for i in range(num_training_tasks):
            task_ind = i
            if i == 0:
                inputa, labela = sess.run(train_next_list[task_ind])
            elif i == 1:
                inputa1, labela1 = sess.run(train_next_list[task_ind])
            else:
                inputb, labelb = sess.run(train_next_list[task_ind])
        feed_dict = {model.inputa: inputa, model.labela: labela, \
                     model.inputa1: inputa1, model.labela1: labela1, \
                     model.inputb: inputb, model.labelb: labelb, \
                     model.KEEP_PROB: 0.5}

        output_tensors = [model.task_train_op]
        output_tensors.extend([model.summ_op, model.source_loss, model.source_accuracy])
        _, summ_writer, source_loss, source_accuracy = sess.run(output_tensors, feed_dict)

        source_losses.append(source_loss)
        source_accuracies.append(source_accuracy)

        if itr % FLAGS.print_interval == 0:
            print('---' * 10 + '\n%s' % exp_string)
            print('Iteration %d' % itr + ': Loss ' + 'training domains ' + str(np.mean(source_losses)))
            print('Iteration %d' % itr + ': Accuracy ' + 'training domains ' + str(np.mean(source_accuracies)))
            # log loss and accuracy:
            path_save_train_acc = os.path.join(FLAGS.logdir, FLAGS.dataset, exp_string)
            if not os.path.exists(path_save_train_acc):
                os.makedirs(path_save_train_acc)
            with open(path_save_train_acc + '/eva_' + 'train' + '.txt', 'a') as fle:
                fle.write('Train results: Iteration %d, Loss: %f, Accuracy: %f \n' % (
                itr, np.mean(source_losses), np.mean(source_accuracies)))
            source_losses, target_losses = [], []

        if itr % FLAGS.summary_interval == 0 and FLAGS.log:
            train_writer.add_summary(summ_writer, itr)

        if itr % FLAGS.save_interval == 0:
            # saver.save(sess, FLAGS.logdir + '/' + FLAGS.dataset + '/' + exp_string + '/model' + str(itr))
            checkpoint_dir = FLAGS.logdir + '/' + FLAGS.dataset + '/' + exp_string + "/" + str(itr) + "/"
            save_network_model(saver_=saver, session_=sess, checkpoint_dir=checkpoint_dir,
                               model_name="model_itr" + str(itr))

        # Testing periodically
        if itr % FLAGS.val_print_interval == 0:
            val_acc, best_val_acc = evaluation_during_training(sess, model, exp_string, val_batches_per_epoch,
                                                               val_init_op, val_next_batch, itr, best_val_acc, saver,
                                                               is_val=True)
        if itr % FLAGS.test_print_interval == 0:
            test_acc, best_test_acc = evaluation_during_training(sess, model, exp_string, test_batches_per_epoch,
                                                                 test_init_op, test_next_batch, itr, best_test_acc,
                                                                 saver, is_val=False)
Exemple #14
0
def train_MASF(model, saver, sess, exp_string, train_file_list, test_file, val_file, resume_itr=0):
    if FLAGS.log:
        train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + FLAGS.dataset + '/' + exp_string, sess.graph)
    source_losses, target_losses, source_accuracies, target_accuracies = [], [], [], []

    # Data loaders
    with tf.device('/cpu:0'):
        tr_data_list, train_iterator_list, train_next_list = [], [], []
        for i in range(len(train_file_list)):
            tr_data = ImageDataGenerator(train_file_list[i], dataroot=FLAGS.dataroot, mode='training', \
                                         batch_size=FLAGS.meta_batch_size, num_classes=FLAGS.num_classes, shuffle=True)
            tr_data_list.append(tr_data)
            train_iterator_list.append(
                tf.data.Iterator.from_structure(tr_data.data.output_types, tr_data.data.output_shapes))
            train_next_list.append(train_iterator_list[i].get_next())

        test_data = ImageDataGenerator(test_file, dataroot=FLAGS.dataroot, mode='inference', \
                                       batch_size=1, num_classes=FLAGS.num_classes, shuffle=False)
        test_iterator = tf.data.Iterator.from_structure(test_data.data.output_types, test_data.data.output_shapes)
        test_next_batch = test_iterator.get_next()

        val_data = ImageDataGenerator(val_file, dataroot=FLAGS.dataroot, mode='inference', \
                                      batch_size=1, num_classes=FLAGS.num_classes, shuffle=False)
        val_iterator = tf.data.Iterator.from_structure(val_data.data.output_types, val_data.data.output_shapes)
        val_next_batch = val_iterator.get_next()

    # Ops for initializing different iterators
    training_init_op = []
    train_batches_per_epoch = []
    for i in range(len(train_file_list)):
        training_init_op.append(train_iterator_list[i].make_initializer(tr_data_list[i].data))
        train_batches_per_epoch.append(int(np.floor(tr_data_list[i].data_size / FLAGS.meta_batch_size)))

    test_init_op = test_iterator.make_initializer(test_data.data)
    test_batches_per_epoch = int(np.floor(test_data.data_size / 1))

    val_init_op = val_iterator.make_initializer(val_data.data)
    val_batches_per_epoch = int(np.floor(val_data.data_size / 1))

    # Training begins
    best_test_acc, best_val_acc = 0, 0
    for itr in range(resume_itr, FLAGS.train_iterations):

        # Sampling training and test tasks
        num_training_tasks = len(train_file_list)
        num_meta_train = num_training_tasks - 1
        num_meta_test = num_training_tasks - num_meta_train  # as setting num_meta_test = 1

        # Randomly choosing meta train and meta test domains
        task_list = np.random.permutation(num_training_tasks)
        meta_train_index_list = task_list[:num_meta_train]
        meta_test_index_list = task_list[num_meta_train:]

        for i in range(len(train_file_list)):
            if itr % train_batches_per_epoch[i] == 0 or resume_itr != 0:
                sess.run(training_init_op[i])  # initialize training sample generator at itr=0

        # Sampling meta-train, meta-test data
        for i in range(num_meta_train):
            task_ind = meta_train_index_list[i]
            if i == 0:
                inputa, labela = sess.run(train_next_list[task_ind])
            elif i == 1:
                inputa1, labela1 = sess.run(train_next_list[task_ind])
            else:
                raise RuntimeError('check number of meta-train domains.')

        for i in range(num_meta_test):
            task_ind = meta_test_index_list[i]
            if i == 0:
                inputb, labelb = sess.run(train_next_list[task_ind])
            else:
                raise RuntimeError('check number of meta-test domains.')

        # to avoid a certain un-sampled class affect stability of global class alignment
        # i.e., mask-out the un-sampled class from computing kd-loss
        sampledb = np.unique(np.argmax(labelb, axis=1))
        sampleda = np.unique(np.argmax(labela, axis=1))
        bool_indicator_b_a = [0.0] * FLAGS.num_classes
        for i in range(FLAGS.num_classes):
            # only count class that are sampled in both source domains
            if (i in sampledb) and (i in sampleda):
                bool_indicator_b_a[i] = 1.0

        sampledb = np.unique(np.argmax(labelb, axis=1))
        sampleda1 = np.unique(np.argmax(labela1, axis=1))
        bool_indicator_b_a1 = [0.0] * FLAGS.num_classes
        for i in range(FLAGS.num_classes):
            if (i in sampledb) and (i in sampleda1):
                bool_indicator_b_a1[i] = 1.0

        part = FLAGS.meta_batch_size / 3
        part = int(part)
        input_group = np.concatenate((inputa[:part], inputa1[:part], inputb[:part]), axis=0)
        label_group = np.concatenate((labela[:part], labela1[:part], labelb[:part]), axis=0)
        group_list = np.sum(label_group, axis=0)
        label_group = np.argmax(label_group, axis=1)  # transform one-hot labels into class-wise integer

        feed_dict = {model.inputa: inputa, model.labela: labela, \
                     model.inputa1: inputa1, model.labela1: labela1, \
                     model.inputb: inputb, model.labelb: labelb, \
                     model.input_group: input_group, model.label_group: label_group, \
                     model.bool_indicator_b_a: bool_indicator_b_a, model.bool_indicator_b_a1: bool_indicator_b_a1,
                     model.KEEP_PROB: 0.5}

        output_tensors = [model.task_train_op, model.meta_train_op, model.metric_train_op]
        output_tensors.extend(
            [model.summ_op, model.global_loss, model.source_loss, model.source_accuracy, model.metric_loss])
        _, _, _, summ_writer, global_loss, source_loss, source_accuracy, metric_loss = sess.run(output_tensors,
                                                                                                feed_dict)

        source_losses.append(source_loss)
        source_accuracies.append(source_accuracy)

        if itr % FLAGS.print_interval == 0:
            print('---' * 10 + '\n%s' % exp_string)
            print('number of samples per category:', group_list)
            print('global loss: %.7f' % global_loss)
            print('metric loss: %.7f ' % metric_loss)
            print('Iteration %d' % itr + ': Loss ' + 'training domains ' + str(np.mean(source_losses)))
            print('Iteration %d' % itr + ': Accuracy ' + 'training domains ' + str(np.mean(source_accuracies)))
            # log loss and accuracy:
            path_save_train_acc = os.path.join(FLAGS.logdir, FLAGS.dataset, exp_string)
            if not os.path.exists(path_save_train_acc):
                os.makedirs(path_save_train_acc)
            with open(path_save_train_acc + '/eva_' + 'train' + '.txt', 'a') as fle:
                fle.write(
                    'Train results: Iteration %d, global loss: %.7f, metric loss: %.7f, Loss: %f, Accuracy: %f \n' % (
                    itr, global_loss, metric_loss, np.mean(source_losses), np.mean(source_accuracies)))
            source_losses, target_losses = [], []

        if itr % FLAGS.summary_interval == 0 and FLAGS.log:
            train_writer.add_summary(summ_writer, itr)

        if itr % FLAGS.save_interval == 0:
            # saver.save(sess, FLAGS.logdir + '/' + FLAGS.dataset + '/' + exp_string + '/model' + str(itr))
            checkpoint_dir = FLAGS.logdir + '/' + FLAGS.dataset + '/' + exp_string + "/" + str(itr) + "/"
            save_network_model(saver_=saver, session_=sess, checkpoint_dir=checkpoint_dir,
                               model_name="model_itr" + str(itr))

        # Testing periodically:
        if itr % FLAGS.val_print_interval == 0:
            val_acc, best_val_acc = evaluation_during_training(sess, model, exp_string, val_batches_per_epoch,
                                                               val_init_op, val_next_batch, itr, best_val_acc, saver,
                                                               is_val=True)
        if itr % FLAGS.test_print_interval == 0:
            test_acc, best_test_acc = evaluation_during_training(sess, model, exp_string, test_batches_per_epoch,
                                                                 test_init_op, test_next_batch, itr, best_test_acc,
                                                                 saver, is_val=False)