コード例 #1
0
def main(_):
    # TODO: do not pass source label in target mode (it's not needed!)
    """Main function for Deep-Reconstruction Classification Network - DRCN"""
    tf.reset_default_graph()
    # Load source and target data set
    source_size = 32
    target_size = 32
    if FLAGS.source == 'mnist':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_mnist(FLAGS.channel_size, False)
    elif FLAGS.source == 'mnistm':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.source == 'svhn':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_svhn(FLAGS.channel_size, False)
    else:
        sys.exit('For the source set you have to choose one of [svhn, mnist, mnistm]!')

    if FLAGS.target == 'mnist':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_mnist(FLAGS.channel_size, False)
    elif FLAGS.target == 'mnistm':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.target == 'svhn':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_svhn(FLAGS.channel_size, False)
    else:
        sys.exit('For the target set you have to choose one of [svhn, mnist, mnistm]!')

    # Create data placeholders.
    placeholder_x_s = tf.placeholder(tf.float32, shape=[None, source_size, source_size, FLAGS.channel_size])
    placeholder_y_s = tf.placeholder(tf.int32, shape=[None])
    placeholder_x_t = tf.placeholder(tf.float32, shape=[None, target_size, target_size, FLAGS.channel_size])
    placeholder_y_t = tf.placeholder(tf.int32, shape=[None])
    placeholder_training = tf.placeholder_with_default(tf.constant(True), shape=[])
    ds_source, ds_target = create_dataset(placeholder_x_s, placeholder_y_s, placeholder_x_t, placeholder_y_t)

    iterator = tf.data.Iterator.from_structure(ds_source.output_types, ds_source.output_shapes)
    x, y = iterator.get_next()

    # Init model
    drcn = Model(FLAGS.opt)
    drcn.train_source(x, y, placeholder_training)
    if FLAGS.source_only.lower() == 'false':
        drcn.train_target(x, y)

    source_iterator = iterator.make_initializer(ds_source)
    target_iterator = iterator.make_initializer(ds_target)

    # Configs
    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    # Stats
    source_acc_train = []
    source_acc_test = []
    target_acc_train = []
    target_acc_test = []
    source_loss_train = []
    target_loss_train = []

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        if tf.train.latest_checkpoint(FLAGS.model_dir) is not None:
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_dir))
        for epoch in range(FLAGS.total_epochs):
            print('Epoch: ', epoch)
            if FLAGS.source_only.lower() == 'false':
                sess.run(target_iterator, feed_dict={placeholder_x_t: x_train_t, placeholder_y_t: y_train_t})
                i = 0
                total_loss = 0
                total_acc = 0
                try:
                    with tqdm(total=len(x_train_t)) as pbar:
                        while True:
                            _, out_loss, source_acc = sess.run([drcn.optimize_reconstruction, drcn.rec_loss,
                                                               drcn.target_class_acc],
                                                               feed_dict={placeholder_training: False})
                            i += 1
                            total_loss += out_loss
                            total_acc += source_acc
                            pbar.update(FLAGS.batch_size)
                            # pbar.write(str(source_acc))
                except tf.errors.OutOfRangeError:
                    print('Done with train target epoch.')
                print(total_acc / i)
                print(total_loss / i)
                target_acc_train.append((epoch, total_acc/float(i)))
                target_loss_train.append((epoch, total_loss/float(i)))
            sess.run(target_iterator, feed_dict={placeholder_x_t: x_test_t, placeholder_y_t: y_test_t})
            i = 0
            total_loss = 0
            total_acc = 0
            try:
                with tqdm(total=len(x_test_t)) as pbar:
                    while True:
                        out_loss, source_acc = sess.run([drcn.source_class_loss, drcn.source_class_acc],
                                                        feed_dict={placeholder_training: False})
                        i += 1
                        total_loss += out_loss
                        total_acc += source_acc
                        pbar.update(FLAGS.batch_size)
            except tf.errors.OutOfRangeError:
                print('Done with evaluation target epoch.')
            print(total_acc / i)
            print(total_loss / i)
            target_acc_test.append((epoch, total_acc / float(i)))
            sess.run(source_iterator, feed_dict={placeholder_x_s: x_train_s, placeholder_y_s: y_train_s})
            i = 0
            total_loss = 0
            total_acc = 0
            try:
                with tqdm(total=len(x_train_s)) as pbar:
                    while True:
                        _, out_loss, source_acc = sess.run([drcn.optimize_class, drcn.source_class_loss,
                                                            drcn.source_class_acc])
                        i += 1
                        total_loss += out_loss
                        total_acc += source_acc
                        pbar.update(FLAGS.batch_size)
            except tf.errors.OutOfRangeError:
                print('Done with source train epoch.')
            print(total_acc/i)
            print(total_loss/i)
            source_acc_train.append((epoch, total_acc/float(i)))
            source_loss_train.append((epoch, total_loss/float(i)))
            sess.run(source_iterator, feed_dict={placeholder_x_s: x_test_s, placeholder_y_s: y_test_s})
            i = 0
            total_loss = 0
            total_acc = 0
            try:
                with tqdm(total=len(x_test_s)) as pbar:
                    while True:
                        out_loss, source_acc = sess.run([drcn.source_class_loss, drcn.source_class_acc],
                                                        feed_dict={placeholder_training: False})
                        i += 1
                        total_loss += out_loss
                        total_acc += source_acc
                        pbar.update(FLAGS.batch_size)
            except tf.errors.OutOfRangeError:
                print('Done with evaluation source epoch.')
            print(total_acc / i)
            print(total_loss / i)
            source_acc_test.append((epoch, total_acc/float(i)))

            saver.save(sess, FLAGS.model_dir, global_step=epoch)
    # Save stats for visualization
    with open(os.path.join(FLAGS.model_dir, 'stats.pkl'), 'wb') as f:
        pickle.dump({'source_acc_train': source_acc_train, 'source_acc_test': source_acc_test,
                     'target_acc_train': target_acc_train, 'target_acc_test': target_acc_test,
                     'source_loss_train': source_loss_train, 'target_loss_train': target_loss_train}, f)
コード例 #2
0
def main(_):
    # TODO: do not pass source label in target mode (it's not needed!)
    """Main function for Adversarial Discriminative Domain Adaptation - ADDA"""
    tf.reset_default_graph()
    if FLAGS.step == 'source':
        # Pretrain source classifier on SVHN dataset.
        (x_train, y_train), (x_test, y_test) = dp.load_svhn(channel_size=1,
                                                            truncate=False)
        # Configurations first
        iter_ratio = math.ceil((x_train.shape[0] / FLAGS.batch_size))
        print(iter_ratio)
        # SVHN image shape is 32x32x3
        feature_columns = [
            tf.feature_column.numeric_column("source",
                                             shape=(32, 32,
                                                    FLAGS.channel_size))
        ]

        # Set up the session config
        session_config = tf.ConfigProto()
        session_config.gpu_options.allow_growth = True

        config = tf.estimator.RunConfig(save_checkpoints_steps=int(iter_ratio),
                                        log_step_count_steps=None,
                                        session_config=session_config)

        # Set up the estimator
        classifier = tf.estimator.Estimator(
            model_fn=estimator_model_fn,
            model_dir="./model_s2m/source_model",
            params={
                'feature_columns': feature_columns,
                'iter_ratio': iter_ratio,
                'channel_size': FLAGS.channel_size
            },
            config=config)
        # Define hooks
        logging_hook = tf.train.LoggingTensorHook(tensors={
            "loss":
            "loss",
            "source_class_acc":
            "source_class_acc"
        },
                                                  every_n_iter=100)
        # Set up train and eval specs
        train_spec = tf.estimator.TrainSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {'source': x_train}, {'labels': y_train},
                shuffle=True,
                batch_size=128,
                num_epochs=FLAGS.total_epochs),
            hooks=[logging_hook])
        eval_spec = tf.estimator.EvalSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn({'source': x_test},
                                                        {'labels': y_test},
                                                        shuffle=True,
                                                        batch_size=128,
                                                        num_epochs=1),
            steps=None,
            throttle_secs=300)
        # Train and evaluate
        tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
    if FLAGS.step == 'target':
        # Load SVHN dataset
        (x_train_s, y_train_s), (x_test_s,
                                 y_test_s) = dp.load_svhn(channel_size=1)
        # Load MNIST dataset
        (x_train_t, y_train_t), (x_test_t,
                                 y_test_t) = dp.load_mnist(channel_size=1)
        # Configurations first
        iter_ratio = math.ceil((x_train_s.shape[0] / FLAGS.batch_size))
        print(iter_ratio)
        # Mnist shape is 28 x 28 pixels (transformed to three channels)
        # SVHN shape is 32 x 32 pixels with 3 channels
        feature_columns = [
            tf.feature_column.numeric_column("source",
                                             shape=(32, 32,
                                                    FLAGS.channel_size)),
            tf.feature_column.numeric_column("target",
                                             shape=(28, 28,
                                                    FLAGS.channel_size))
        ]

        # Set up the session config
        session_config = tf.ConfigProto()
        session_config.gpu_options.allow_growth = True

        config = tf.estimator.RunConfig(save_checkpoints_steps=1000,
                                        log_step_count_steps=None,
                                        session_config=session_config)

        # Set up the estimator
        classifier = tf.estimator.Estimator(
            model_fn=estimator_model_fn,
            model_dir="./model_s2m/adversarial_model",
            params={
                'feature_columns': feature_columns,
                'iter_ratio': iter_ratio,
                'channel_size': FLAGS.channel_size
            },
            config=config)
        # Define hooks
        logging_hook = tf.train.LoggingTensorHook(tensors={
            "loss_gen":
            "loss_gen",
            "loss_adv":
            "loss_adv",
            "target_class_acc":
            "target_class_acc",
            "source_class_acc":
            "source_class_acc",
            "target_class_acc_enc":
            "target_class_acc_enc"
        },
                                                  every_n_iter=100)
        # Set up train and eval specs
        train_spec = tf.estimator.TrainSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'source': x_train_s,
                    'target': x_train_t
                }, {
                    'label_s': y_train_s,
                    'label_t': y_train_t
                },
                shuffle=True,
                batch_size=128,
                num_epochs=FLAGS.total_epochs),
            hooks=[logging_hook])
        eval_spec = tf.estimator.EvalSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'source': x_test_s,
                    'target': x_test_t
                }, {
                    'label_s': y_test_s,
                    'label_t': y_test_t
                },
                shuffle=True,
                batch_size=128,
                num_epochs=1),
            steps=None,
            throttle_secs=300)
        # Train and evaluate
        tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
コード例 #3
0
def main(_):
    """Main function for Domain Adaptation by Neural Networks - DANN"""
    tf.reset_default_graph()
    # Load source and target data set
    if FLAGS.source == 'mnist':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_mnist(
            FLAGS.channel_size,
            FLAGS.truncate_mnist.lower() == 'true')
    elif FLAGS.source == 'mnistm':
        (x_train_s, y_train_s), (x_test_s,
                                 y_test_s) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.source == 'svhn':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_svhn(
            FLAGS.channel_size,
            FLAGS.truncate_svhn.lower() == 'true')
    else:
        sys.exit(
            'For the source set you have to choose one of [svhn, mnist, mnistm]!'
        )

    if FLAGS.target == 'mnist':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_mnist(
            FLAGS.channel_size,
            FLAGS.truncate_mnist.lower() == 'true')
    elif FLAGS.target == 'mnistm':
        (x_train_t, y_train_t), (x_test_t,
                                 y_test_t) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.target == 'svhn':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_svhn(
            FLAGS.channel_size,
            FLAGS.truncate_svhn.lower() == 'true')
    else:
        sys.exit(
            'For the target set you have to choose one of [svhn, mnist, mnistm]!'
        )

    # Configurations first
    iter_ratio = math.ceil((x_train_s.shape[0] / FLAGS.batch_size))
    print(iter_ratio)
    # We are working with transformed MNIST dataset => image shape is 28x28x3
    feature_columns = [
        tf.feature_column.numeric_column("x_s",
                                         shape=(32, 32, FLAGS.channel_size)),
        tf.feature_column.numeric_column("x_t",
                                         shape=(32, 32, FLAGS.channel_size))
    ]

    # Set up the session config
    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True

    config = tf.estimator.RunConfig(save_checkpoints_steps=int(iter_ratio),
                                    log_step_count_steps=100,
                                    session_config=session_config)

    # Set up the estimator
    classifier = tf.estimator.Estimator(model_fn=estimator_model_fn,
                                        model_dir=FLAGS.model_dir,
                                        params={
                                            'feature_columns': feature_columns,
                                            'iter_ratio': iter_ratio,
                                            'source_size': 32,
                                            'target_size': 32
                                        },
                                        config=config)
    if FLAGS.mode == 'train':
        # Set up logging in training mode "test_source_acc": "test_source_acc",
        #                      "test_target_acc": "test_target_acc"
        train_hook = tf.train.LoggingTensorHook(tensors={
            "lr":
            "learning_rate",
            "loss":
            "loss",
            "source_class_acc":
            "source_class_acc"
        },
                                                every_n_iter=100)
        # Train and evaluate DANN
        train_spec = tf.estimator.TrainSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'x_s': x_train_s,
                    'x_t': x_train_t
                }, {
                    'y_s': y_train_s,
                    'y_t': y_train_t
                },
                shuffle=True,
                batch_size=128,
                num_epochs=FLAGS.total_epochs),
            max_steps=int(iter_ratio * FLAGS.total_epochs),
            hooks=[train_hook])
        eval_spec = tf.estimator.EvalSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'x_s': x_test_s,
                    'x_t': x_test_t
                },
                {
                    'y_s': y_test_s,
                    'y_t': y_test_t
                },
                shuffle=True,
                batch_size=128,
                num_epochs=1,
            ),
            steps=None,
            throttle_secs=1)
        tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
    elif FLAGS.mode == 'eval':
        classifier.evaluate(input_fn=tf.estimator.inputs.numpy_input_fn(
            {
                'x_s': x_test_s,
                'x_t': x_test_t
            },
            {
                'y_s': y_test_s,
                'y_t': y_test_t
            },
            shuffle=True,
            batch_size=128,
            num_epochs=1,
        ))
    else:
        assert FLAGS.mode == 'predict', '-mode flag has to be one of "train", "predict".'
コード例 #4
0
def main(_):
    # TODO: do not pass source label in target mode (it's not needed!)
    """Main function for Adversarial Discriminative Domain Adaptation - ADDA"""
    tf.reset_default_graph()
    # Load source and target data set
    source_size = 32
    target_size = 32
    if FLAGS.source == 'mnist':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_mnist(
            FLAGS.channel_size,
            FLAGS.truncate_mnist.lower() == 'true')
    elif FLAGS.source == 'mnistm':
        (x_train_s, y_train_s), (x_test_s,
                                 y_test_s) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.source == 'svhn':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_svhn(
            FLAGS.channel_size,
            FLAGS.truncate_svhn.lower() == 'true')
    else:
        sys.exit(
            'For the source set you have to choose one of [svhn, mnist, mnistm]!'
        )

    if FLAGS.target == 'mnist':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_mnist(
            FLAGS.channel_size,
            FLAGS.truncate_mnist.lower() == 'true')
    elif FLAGS.target == 'mnistm':
        (x_train_t, y_train_t), (x_test_t,
                                 y_test_t) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.target == 'svhn':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_svhn(
            FLAGS.channel_size,
            FLAGS.truncate_svhn.lower() == 'true')
    else:
        sys.exit(
            'For the target set you have to choose one of [svhn, mnist, mnistm]!'
        )
    # Configurations first
    iter_ratio = math.ceil((x_train_s.shape[0] / FLAGS.batch_size))
    # Start training from here
    if FLAGS.step == 'source':
        feature_columns = [
            tf.feature_column.numeric_column("source",
                                             shape=(source_size, source_size,
                                                    FLAGS.channel_size)),
            tf.feature_column.numeric_column("target",
                                             shape=(target_size, target_size,
                                                    FLAGS.channel_size))
        ]
        # Set up the session config
        session_config = tf.ConfigProto()
        session_config.gpu_options.allow_growth = True

        config = tf.estimator.RunConfig(save_checkpoints_steps=int(iter_ratio),
                                        log_step_count_steps=iter_ratio,
                                        session_config=session_config)

        # Set up the estimator
        classifier = tf.estimator.Estimator(model_fn=estimator_model_fn,
                                            model_dir=FLAGS.source_model,
                                            params={
                                                'feature_columns':
                                                feature_columns,
                                                'iter_ratio': iter_ratio,
                                                'channel_size':
                                                FLAGS.channel_size,
                                                'source_size': source_size,
                                                'target_size': target_size
                                            },
                                            config=config)
        # Define hooks
        logging_hook = tf.train.LoggingTensorHook(tensors={
            "loss":
            "loss",
            "source_class_acc":
            "source_class_acc"
        },
                                                  every_n_iter=iter_ratio)
        # Set up train and eval specs
        train_spec = tf.estimator.TrainSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'source': x_train_s,
                    'target': x_train_t
                }, {
                    'label_s': y_train_s,
                    'label_t': y_train_t
                },
                shuffle=True,
                batch_size=FLAGS.batch_size,
                num_epochs=FLAGS.total_epochs),
            max_steps=int(iter_ratio * FLAGS.total_epochs),
            hooks=[logging_hook])
        eval_spec = tf.estimator.EvalSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'source': x_test_s,
                    'target': x_test_t
                }, {
                    'label_s': y_test_s,
                    'label_t': y_test_t
                },
                shuffle=True,
                batch_size=FLAGS.batch_size,
                num_epochs=1),
            steps=None,
            throttle_secs=1)
        # Train and evaluate
        tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
    if FLAGS.step == 'target':
        feature_columns = [
            tf.feature_column.numeric_column("source",
                                             shape=(source_size, source_size,
                                                    FLAGS.channel_size)),
            tf.feature_column.numeric_column("target",
                                             shape=(target_size, target_size,
                                                    FLAGS.channel_size))
        ]

        # Set up the session config
        session_config = tf.ConfigProto()
        session_config.gpu_options.allow_growth = True

        config = tf.estimator.RunConfig(save_checkpoints_steps=iter_ratio,
                                        log_step_count_steps=iter_ratio,
                                        session_config=session_config)

        # Set up the estimator
        classifier = tf.estimator.Estimator(model_fn=estimator_model_fn,
                                            model_dir=FLAGS.target_model,
                                            params={
                                                'feature_columns':
                                                feature_columns,
                                                'iter_ratio': iter_ratio,
                                                'channel_size':
                                                FLAGS.channel_size,
                                                'source_size': source_size,
                                                'target_size': target_size
                                            },
                                            config=config)
        # Define hooks
        logging_hook = tf.train.LoggingTensorHook(tensors={
            "learning_rate":
            "learning_rate",
            "loss_gen":
            "loss_gen",
            "loss_adv":
            "loss_adv",
            "target_class_acc":
            "target_class_acc",
            "source_class_acc":
            "source_class_acc"
        },
                                                  every_n_iter=iter_ratio - 1)
        # Set up train and eval specs
        train_spec = tf.estimator.TrainSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'source': x_train_s,
                    'target': x_train_t
                }, {
                    'label_s': y_train_s,
                    'label_t': y_train_t
                },
                shuffle=True,
                batch_size=FLAGS.batch_size,
                num_epochs=FLAGS.total_epochs),
            max_steps=int(iter_ratio * FLAGS.total_epochs),
            hooks=[logging_hook])
        eval_spec = tf.estimator.EvalSpec(
            input_fn=tf.estimator.inputs.numpy_input_fn(
                {
                    'source': x_test_s,
                    'target': x_test_t
                }, {
                    'label_s': y_test_s,
                    'label_t': y_test_t
                },
                shuffle=True,
                batch_size=FLAGS.batch_size,
                num_epochs=1),
            steps=None,
            throttle_secs=1)
        # Train and evaluate
        tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)