Beispiel #1
0
def main(_):
    util.config_logging()

    config = tf.ConfigProto(device_count=dict(GPU=1))
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    #########################
    # Preprocess the inputs #
    #########################
    source_dataset = dataset_factory.get_dataset(FLAGS.source_dataset,
                                                 split_name='train',
                                                 dataset_dir=FLAGS.dataset_dir)
    num_source_classes = source_dataset.num_classes
    source_images, source_labels = dataset_factory.provide_batch(
        FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers,
        32, FLAGS.num_preprocessing_threads)
    source_label = tf.argmax(source_labels['classes'], 1)
    del source_labels['classes']

    target_dataset = dataset_factory.get_dataset(FLAGS.target_dataset,
                                                 split_name='test',
                                                 dataset_dir=FLAGS.dataset_dir)
    target_images, target_labels = dataset_factory.provide_batch(
        FLAGS.target_dataset, 'test', FLAGS.dataset_dir, FLAGS.num_readers, 1,
        FLAGS.num_preprocessing_threads)
    target_label = tf.argmax(target_labels['classes'], -1)
    del target_labels['classes']
    num_target_classes = target_dataset.num_classes

    ####################
    # Define the model #
    ####################
    gen, dis, cls = pixelda_model.create_model(target_images, source_images,
                                               num_target_classes)
    cls['target_task_logits'] = tf.argmax(cls['target_task_logits'], -1)

    net, layers = classifier.LeNet(target_images,
                                   False,
                                   num_target_classes,
                                   reuse_private=False,
                                   private_scope='target',
                                   reuse_shared=False,
                                   shared_scope='target')
    net = tf.argmax(net, -1)

    with slim.queues.QueueRunners(sess):
        #######################################pixelda transfer####################################
        cls_var = util.collect_vars('classifier')
        util.evalutation(sess, cls['target_task_logits'], target_label,
                         num_target_classes, '../PixelDA/snapshot', 'pixelda',
                         target_dataset.num_samples, cls_var, target_images)

        #######################################evaluate adda############################################
        target_vars = util.collect_vars('target')
        target_vars = util.copyKeySet(cls_var, target_vars)
        util.evalutation(sess, net, target_label, num_target_classes,
                         'snapshot', 'adda_pixelda',
                         target_dataset.num_samples, target_vars,
                         target_images)
Beispiel #2
0
def main(_):
    util.config_logging()

    config = tf.ConfigProto(device_count=dict(GPU=1))
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    target_dataset = dataset_factory.get_dataset(FLAGS.target_dataset,
                                                 split_name='test',
                                                 dataset_dir=FLAGS.dataset_dir)
    target_images, target_labels = dataset_factory.provide_batch(
        FLAGS.target_dataset, 'test', FLAGS.dataset_dir, FLAGS.num_readers, 1,
        FLAGS.num_preprocessing_threads)
    target_label = tf.argmax(target_labels['classes'], -1)
    del target_labels['classes']
    num_target_classes = target_dataset.num_classes

    ####################
    # Define the model #
    ####################
    net, layers = classifier.LeNet(target_images,
                                   False,
                                   num_target_classes,
                                   reuse_private=False,
                                   private_scope='source_only',
                                   reuse_shared=False,
                                   shared_scope='source_only')
    net = tf.argmax(net, -1)

    sess.run(tf.global_variables_initializer())

    with slim.queues.QueueRunners(sess):
        #######################################evaluate source only####################################
        util.evalutation(sess, net, target_label, num_target_classes,
                         'ADDA/snapshot', 'LeNet_mnist',
                         target_dataset.num_samples, target_images)

        #######################################evaluate adda############################################
        util.evalutation(sess, net, target_label, num_target_classes,
                         'ADDA/snapshot', 'adda_lenet_mnist_minstm',
                         target_dataset.num_samples, target_images)
Beispiel #3
0
def main(_):
    util.config_logging()

    config = tf.ConfigProto(device_count=dict(GPU=1))
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    seed = random.randrange(2**32 - 2)
    logging.info('Using random seed {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed + 1)
    tf.set_random_seed(seed + 2)

    #########################
    # Preprocess the inputs #
    #########################
    source_dataset = dataset_factory.get_dataset(FLAGS.source_dataset,
                                                 split_name='train',
                                                 dataset_dir=FLAGS.dataset_dir)
    num_source_classes = source_dataset.num_classes
    source_images, _ = dataset_factory.provide_batch(
        FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers,
        128, FLAGS.num_preprocessing_threads)

    target_dataset = dataset_factory.get_dataset(FLAGS.target_dataset,
                                                 split_name='train',
                                                 dataset_dir=FLAGS.dataset_dir)
    num_target_classes = target_dataset.num_classes
    target_images, _ = dataset_factory.provide_batch(
        FLAGS.target_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers,
        128, FLAGS.num_preprocessing_threads)

    if num_source_classes != num_target_classes:
        raise ValueError(
            'Source and Target datasets must have same number of classes. '
            'Are %d and %d' % (num_source_classes, num_target_classes))

    gen, dis, cls, transLayer = pixelda_model.create_model(
        target_images, source_images, num_source_classes)

    target_net, target_layers = classifier.LeNet(target_images,
                                                 False,
                                                 num_target_classes,
                                                 reuse_private=False,
                                                 private_scope='target',
                                                 reuse_shared=False,
                                                 shared_scope='target')
    source_net = cls['transferred_task_logits']

    # 어떤 레이어를 비슷하게 쫒아갈 것인지 여기서 결정
    source_net = transLayer['fc3']
    target_net = target_layers['fc3']

    # adversarial network - 다차원일 때 1차원으로 펴주기 위한 것이기는하나.. 이미 벡터라서 예제에는 의미가 없다.
    source_net = tf.reshape(source_net, [-1, int(source_net.get_shape()[-1])])
    target_net = tf.reshape(target_net, [-1, int(target_net.get_shape()[-1])])
    # 각 net에서 올라온것을 Batch처럼 보고 사용함
    adversary_net = tf.concat([source_net, target_net], 0)
    source_adversary_label = tf.zeros([tf.shape(source_net)[0]],
                                      tf.int32)  # source가 들어오면 0으로 맞추게
    target_adversary_label = tf.ones([tf.shape(target_net)[0]],
                                     tf.int32)  # target이 들어오면 1로 맞추게
    adversary_label = tf.concat(
        [source_adversary_label, target_adversary_label], 0)
    adversary_logits = adversary.adversarial_discriminator(
        adversary_net, [500, 500])

    #################Loss Define##########################################
    mapping_loss = tf.losses.sparse_softmax_cross_entropy(
        1 - adversary_label,
        adversary_logits)  # Mapping Loss는 네트워크가 못 맞출 수록 낮음
    adversary_loss = tf.losses.sparse_softmax_cross_entropy(
        adversary_label, adversary_logits)  # Adv loss는 잘 맞출 수록 낮음

    gan_var = util.collect_vars('generator')
    cls_var = util.collect_vars('classifier')
    target_vars = util.collect_vars('target')
    adversary_vars = util.collect_vars('adversary')
    target_vars = util.copyKeySet(cls_var, target_vars)

    lr_var = tf.Variable(FLAGS.lr, name='learning_rate', trainable=False)
    optimizer = tf.train.AdamOptimizer(lr_var, 0.5)

    mapping_step = optimizer.minimize(
        mapping_loss, var_list=list(
            target_vars.values()))  # adversary_ft가 잘 못맞추게 target var를 학습한다
    adversary_step = optimizer.minimize(
        adversary_loss, var_list=list(
            adversary_vars.values()))  # adversary_ft가 잘 맞출수 있게 학습한다. 구분되게

    # restore weights => 이 부분 수정해야함. gan, cls 로드할 수 있게
    sess.run(tf.global_variables_initializer())
    output_dir = os.path.join('../PixelDA/snapshot', 'pixelda')
    if os.path.isdir(output_dir):
        weights = tf.train.latest_checkpoint(output_dir)
        logging.info('Restoring weights from {}:'.format(weights))
        logging.info('    Restoring generator model:')
        for src, tgt in gan_var.items():
            logging.info('        {:30} -> {:30}'.format(src, tgt.name))
        gen_restorer = tf.train.Saver(var_list=gan_var)
        gen_restorer.restore(sess, weights)
        logging.info('Restoring weights from {}:'.format(weights))
        logging.info('    Restoring classifier model:')
        for src, tgt in cls_var.items():
            logging.info('        {:30} -> {:30}'.format(src, tgt.name))
        cls_restorer = tf.train.Saver(var_list=cls_var)
        cls_restorer.restore(sess, weights)
        logging.info('    Restoring target model:')
        for src, tgt in target_vars.items():
            logging.info('        {:30} -> {:30}'.format(src, tgt.name))
        target_restorer = tf.train.Saver(var_list=target_vars)
        target_restorer.restore(sess, weights)
    else:
        return

    output_dir = os.path.join('snapshot', 'adda_pixelda')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    mapping_losses = deque(maxlen=10)
    adversary_losses = deque(maxlen=10)
    bar = tqdm(range(FLAGS.iteration))
    bar.set_description('{} (lr: {:.0e})'.format('adda_pixelda', FLAGS.lr))
    bar.refresh()

    #tensorboard
    writer = tf.summary.FileWriter(output_dir, sess.graph)

    display = 10
    stepsize = None
    with slim.queues.QueueRunners(sess):
        for i in bar:
            #g-step
            mapping_loss_val, _ = sess.run([mapping_loss, mapping_step])
            mapping_losses.append(mapping_loss_val)

            #d-step
            adversary_loss_val, _ = sess.run([adversary_loss, adversary_step])
            adversary_losses.append(adversary_loss_val)

            if i % display == 0:
                logging.info(
                    '{:20} Mapping: {:10.4f}     (avg: {:10.4f})'
                    '    Adversary: {:10.4f}     (avg: {:10.4f})'.format(
                        'Iteration {}:'.format(i), mapping_loss_val,
                        np.mean(mapping_losses), adversary_loss_val,
                        np.mean(adversary_losses)))
            if stepsize is not None and (i + 1) % stepsize == 0:
                lr = sess.run(lr_var.assign(FLAGS.lr * 0.1))
                logging.info('Changed learning rate to {:.0e}'.format(lr))
                bar.set_description('{} (lr: {:.0e})'.format(
                    'adda_lenet_svhn_mnist', lr))
            if (i + 1) % FLAGS.snapshot == 0:
                snapshot_path = target_restorer.save(
                    sess,
                    os.path.join(output_dir, 'adda_lenet_svhn_mnist'),
                    global_step=i + 1)
                logging.info('Saved snapshot to {}'.format(snapshot_path))
Beispiel #4
0
def main(_):
    util.config_logging()

    config = tf.ConfigProto(device_count=dict(GPU=1))
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    #########################
    # Preprocess the inputs #
    #########################
    source_dataset = dataset_factory.get_dataset(FLAGS.source_dataset,
                                                 split_name='test',
                                                 dataset_dir=FLAGS.dataset_dir)
    num_source_classes = source_dataset.num_classes
    source_images, source_labels = dataset_factory.provide_batch(
        FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, 1,
        FLAGS.num_preprocessing_threads)
    source_label = tf.argmax(source_labels['classes'], 1)
    del source_labels['classes']

    target_dataset = dataset_factory.get_dataset(FLAGS.target_dataset,
                                                 split_name='test',
                                                 dataset_dir=FLAGS.dataset_dir)
    target_images, target_labels = dataset_factory.provide_batch(
        FLAGS.target_dataset, 'test', FLAGS.dataset_dir, FLAGS.num_readers, 1,
        FLAGS.num_preprocessing_threads)
    target_label = tf.argmax(target_labels['classes'], -1)
    del target_labels['classes']
    num_target_classes = target_dataset.num_classes

    if num_source_classes != num_target_classes:
        raise ValueError(
            'Source and Target datasets must have same number of classes. '
            'Are %d and %d' % (num_source_classes, num_target_classes))

    gen, dis, cls = pixelda_model.create_model(target_images, source_images,
                                               num_source_classes)

    cls['target_task_logits'] = tf.argmax(cls['target_task_logits'], -1)
    cls['transferred_task_logits'] = tf.argmax(cls['transferred_task_logits'],
                                               -1)
    cls['source_task_logits'] = tf.argmax(cls['source_task_logits'], -1)

    # Use the entire split by default
    num_examples = target_dataset.num_samples

    # cls_var_dict = util.collect_vars('classifier')
    # cls_restorer = tf.train.Saver(var_list=cls_var_dict)
    # gen_var_dict = util.collect_vars('generator')
    # gen_restorer = tf.train.Saver(var_list=gen_var_dict)
    restorer = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    output_dir = os.path.join('PixelDA/snapshot', 'pixelda')
    if os.path.isdir(output_dir):
        weights = tf.train.latest_checkpoint(output_dir)
        logging.info('Evaluating {}'.format(weights))

        # print all tensors in checkpoint file
        # chkp.print_tensors_in_checkpoint_file(weights, tensor_name='', all_tensors=True)

        # cls_restorer.restore(sess, weights)
        # gen_restorer.restore(sess, weights)
        restorer.restore(sess, weights)
    else:
        logging.info('Not Found'.format(output_dir))
        return False

    class_correct = np.zeros(num_source_classes, dtype=np.int32)
    class_counts = np.zeros(num_source_classes, dtype=np.int32)

    # classification loss
    with slim.queues.QueueRunners(sess):
        plt.figure()
        for i in range(16):
            np_image, np_gen = sess.run([source_images, gen])
            _, height, width, _ = np_image.shape
            plt.subplot(4, 8, 2 * i + 1)
            plt.imshow(np_image[0])
            plt.title('%d x %d' % (height, width))
            plt.subplot(4, 8, 2 * i + 2)
            np_gen[0] /= 2
            np_gen[0] += 0.5
            plt.imshow(np_gen[0])
            plt.title('%d x %d' % (height, width))
            plt.axis('off')
        plt.show()

        for i in tqdm(range(num_examples)):
            predictions, gt = sess.run(
                [cls['target_task_logits'], target_label])
            class_counts[gt[0]] += 1
            if predictions[0] == gt[0]:
                class_correct[gt[0]] += 1

        logging.info('Class accuracies:')
        logging.info('    ' + util.format_array(class_correct / class_counts))
        logging.info('Overall accuracy:')
        logging.info('    ' +
                     str(np.sum(class_correct) / np.sum(class_counts)))

    return True
Beispiel #5
0
def main(_):
    util.config_logging()

    config = tf.ConfigProto(device_count=dict(GPU=1))
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    seed = random.randrange(2**32 - 2)
    logging.info('Using random seed {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed + 1)
    tf.set_random_seed(seed + 2)

    #########################
    # Preprocess the inputs #
    #########################
    source_dataset = dataset_factory.get_dataset(FLAGS.source_dataset,
                                                 split_name='train',
                                                 dataset_dir=FLAGS.dataset_dir)
    num_source_classes = source_dataset.num_classes
    source_images, source_labels = dataset_factory.provide_batch(
        FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers,
        32, FLAGS.num_preprocessing_threads)
    source_label = tf.argmax(source_labels['classes'], 1)
    del source_labels['classes']

    target_dataset = dataset_factory.get_dataset(FLAGS.target_dataset,
                                                 split_name='test',
                                                 dataset_dir=FLAGS.dataset_dir)
    target_images, _ = dataset_factory.provide_batch(
        FLAGS.target_dataset, 'test', FLAGS.dataset_dir, FLAGS.num_readers, 32,
        FLAGS.num_preprocessing_threads)
    num_target_classes = target_dataset.num_classes

    if num_source_classes != num_target_classes:
        raise ValueError(
            'Source and Target datasets must have same number of classes. '
            'Are %d and %d' % (num_source_classes, num_target_classes))

    gen, dis, cls = pixelda_model.create_model(target_images, source_images,
                                               num_source_classes, True)

    generator_vars = util.collect_vars('generator')
    discriminator_vars = util.collect_vars('discriminator')
    classfier_vars = util.collect_vars('classifier')

    gen_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'generator')
    dis_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'discriminator')
    cls_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'classifier')
    up_op_total = gen_op + dis_op + cls_op + tf.trainable_variables()

    gen_loss = pixelda_losses.g_step_loss(source_images, source_label, dis,
                                          cls, num_source_classes)
    d_loss = pixelda_losses.d_step_loss(dis, cls, source_label,
                                        num_source_classes)
    # dis_loss = pixelda_losses.discriminator_loss(dis)
    # cls_loss = pixelda_losses.classification_loss(cls, source_label, num_source_classes)

    learning_rate = tf.train.exponential_decay(
        FLAGS.lr,
        tf.train.get_or_create_global_step(),
        decay_steps=20000,
        decay_rate=0.95,
        staircase=True)

    optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)

    dstep_po = dis_op + cls_op
    with tf.control_dependencies(dstep_po):
        dis_step = optimizer.minimize(
            d_loss,
            var_list=list(discriminator_vars.values()) +
            list(classfier_vars.values()))
    with tf.control_dependencies(gen_op):
        gen_step = optimizer.minimize(gen_loss,
                                      var_list=list(generator_vars.values()))
    # with tf.control_dependencies(dis_op):
    #    dis_step = optimizer.minimize(dis_loss, var_list=list(discriminator_vars.values()))
    # with tf.control_dependencies(gen_op):
    #    gen_step = optimizer.minimize(gen_loss, var_list=list(generator_vars.values()))
    # with tf.control_dependencies(cls_op):
    #    cls_step = optimizer.minimize(cls_loss, var_list=list(classfier_vars.values()))

    # dis_var_list = dis_op + list(discriminator_vars.values())
    # gen_vars_list = gen_op + list(generator_vars.values())
    # cls_vars_list = cls_op + list(classfier_vars.values())

    # dis_step = optimizer.minimize(dis_loss, var_list=dis_var_list)
    # gen_step = optimizer.minimize(gen_loss, var_list=gen_vars_list)
    # cls_step = optimizer.minimize(cls_loss, var_list=cls_vars_list)

    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    output_dir = os.path.join('PixelDA/snapshot', 'pixelda')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    dis_losses = deque(maxlen=10)
    gen_losses = deque(maxlen=10)
    cls_losses = deque(maxlen=10)
    bar = tqdm(range(FLAGS.iteration))
    bar.set_description('{} (lr: {:.0e})'.format('pixelda', FLAGS.lr))
    bar.refresh()

    #tensorboard
    writer = tf.summary.FileWriter(output_dir, sess.graph)

    display = 10
    stepsize = None
    with slim.queues.QueueRunners(sess):
        for i in bar:
            # d-step
            # dis_loss_val, _ = sess.run([dis_loss, dis_step])
            # dis_losses.append(dis_loss_val)
            # cls_loss_val, _ = sess.run([cls_loss, cls_step])
            # cls_losses.append(cls_loss_val)

            dstep_loss_val, _ = sess.run([d_loss, dis_step])
            dis_losses.append(dstep_loss_val)

            # g-step
            gen_loss_val, _ = sess.run([gen_loss, gen_step])
            gen_losses.append(gen_loss_val)

            if i % display == 0:
                cur_lr = sess.run(learning_rate)
                logging.info('learning rate : {:10.4f}'.format(cur_lr))
                logging.info(
                    '{:20} dstep loss: {:10.4f}     (avg: {:10.4f})'
                    '    gen loss: {:10.4f}     (avg: {:10.4f})'.format(
                        'Iteration {}:'.format(i), dstep_loss_val,
                        np.mean(dis_losses), gen_loss_val,
                        np.mean(gen_losses)))

            if (i + 1) % FLAGS.snapshot == 0:
                snapshot_path = saver.save(sess,
                                           os.path.join(output_dir, 'pixelda'),
                                           global_step=i + 1)
                logging.info('Saved snapshot to {}'.format(snapshot_path))
Beispiel #6
0
def main(_):
    util.config_logging()

    config = tf.ConfigProto(device_count=dict(GPU=1))
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    seed = random.randrange(2**32 - 2)
    logging.info('Using random seed {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed + 1)
    tf.set_random_seed(seed + 2)

    #########################
    # Preprocess the inputs #
    #########################
    source_dataset = dataset_factory.get_dataset(FLAGS.source_dataset,
                                                 split_name='train',
                                                 dataset_dir=FLAGS.dataset_dir)
    num_source_classes = source_dataset.num_classes
    source_images, source_labels = dataset_factory.provide_batch(
        FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers,
        32, FLAGS.num_preprocessing_threads)
    source_labels['class'] = tf.argmax(source_labels['classes'], 1)
    del source_labels['classes']

    ####################
    # Define the model #
    ####################
    net, layers = classifier.LeNet(source_images,
                                   False,
                                   num_source_classes,
                                   reuse_private=False,
                                   private_scope='source',
                                   reuse_shared=False,
                                   shared_scope='source')
    class_loss = tf.losses.sparse_softmax_cross_entropy(
        source_labels['class'], net)
    loss = tf.losses.get_total_loss()

    lr = FLAGS.lr
    lr_var = tf.Variable(lr, name='learning_rate', trainable=False)
    optimizer = tf.train.MomentumOptimizer(lr_var, 0.99)
    step = optimizer.minimize(loss)
    sess.run(tf.global_variables_initializer())

    model_vars = util.collect_vars('source')
    saver = tf.train.Saver(var_list=model_vars)
    output_dir = os.path.join('ADDA/snapshot', 'LeNet_mnist')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    losses = deque(maxlen=10)
    bar = tqdm(range(FLAGS.iteration))
    bar.set_description('{} (lr: {:.0e})'.format('LeNet_mnist', lr))
    bar.refresh()

    display = 10
    stepsize = None
    with slim.queues.QueueRunners(sess):
        for i in bar:
            loss_val, _ = sess.run([loss, step])
            losses.append(loss_val)
            if i % display == 0:
                logging.info('{:20} {:10.4f}     (avg: {:10.4f})'.format(
                    'Iteration {}:'.format(i), loss_val, np.mean(losses)))
            if stepsize is not None and (i + 1) % stepsize == 0:
                lr = sess.run(lr_var.assign(lr * 0.1))
                logging.info('Changed learning rate to {:.0e}'.format(lr))
                bar.set_description('{} (lr: {:.0e})'.format(
                    'LeNet_mnist', lr))
            if (i + 1) % FLAGS.snapshot == 0:
                snapshot_path = saver.save(sess,
                                           os.path.join(
                                               output_dir, 'LeNet_mnist'),
                                           global_step=i + 1)
                logging.info('Saved snapshot to {}'.format(snapshot_path))

    sess.close()

    return