コード例 #1
0
ファイル: evaluate.py プロジェクト: LONG-9621/Stackedcapsule
    def model_fn(features, labels, mode, params):
        """Returns the model function."""
        feature = features['feature']
        labels = labels['label']
        one_hot_labels = model_utils.get_label(
            labels,
            params,
            FLAGS.src_num_classes,
            batch_size=FLAGS.train_batch_size)

        def get_logits():
            """Return the logits."""
            network_output = model.conv_model(
                feature,
                mode,
                target_dataset=FLAGS.target_dataset,
                src_hw=FLAGS.src_hw,
                target_hw=FLAGS.target_hw)
            name = FLAGS.cls_dense_name
            with tf.variable_scope('target_CLS'):
                logits = tf.layers.dense(inputs=network_output,
                                         units=FLAGS.src_num_classes,
                                         name=name)
            return logits

        logits = get_logits()
        logits = tf.cast(logits, tf.float32)

        dst_loss = tf.losses.softmax_cross_entropy(
            logits=logits,
            onehot_labels=one_hot_labels,
        )
        loss = dst_loss

        eval_metrics = model_utils.metric_fn(labels, logits)

        return tf.estimator.EstimatorSpec(
            mode=mode,
            loss=loss,
            train_op=None,
            eval_metric_ops=eval_metrics,
        )
コード例 #2
0
    def model_fn(features, labels, mode, params):
        """Returns the model function."""
        feature = features['feature']
        labels = labels['label']
        one_hot_labels = model_utils.get_label(
            labels,
            params,
            FLAGS.src_num_classes,
            batch_size=FLAGS.train_batch_size)

        def get_logits():
            """Return the logits."""
            avg_pool = model.conv_model(feature,
                                        mode,
                                        target_dataset=FLAGS.target_dataset,
                                        src_hw=FLAGS.src_hw,
                                        target_hw=FLAGS.target_hw)
            name = 'final_dense_dst'
            with tf.variable_scope('target_CLS'):
                logits = tf.layers.dense(
                    inputs=avg_pool,
                    units=FLAGS.src_num_classes,
                    name=name,
                    kernel_initializer=tf.random_normal_initializer(
                        stddev=.05),
                )
            return logits

        logits = get_logits()
        logits = tf.cast(logits, tf.float32)

        dst_loss = tf.losses.softmax_cross_entropy(
            logits=logits,
            onehot_labels=one_hot_labels,
        )
        dst_l2_loss = FLAGS.weight_decay * tf.add_n([
            tf.nn.l2_loss(v) for v in tf.trainable_variables()
            if 'batch_normalization' not in v.name and 'kernel' in v.name
        ])

        loss = dst_loss + dst_l2_loss

        train_op = None
        if mode == tf_estimator.ModeKeys.TRAIN:
            cur_finetune_step = tf.train.get_global_step()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                finetune_learning_rate = lr_schedule()
                optimizer = tf.train.MomentumOptimizer(
                    learning_rate=finetune_learning_rate,
                    momentum=0.9,
                    use_nesterov=True)
                train_op = tf.contrib.slim.learning.create_train_op(
                    loss, optimizer)
                with tf.variable_scope('finetune'):
                    train_op = optimizer.minimize(loss, cur_finetune_step)
        else:
            train_op = None

        eval_metrics = None
        if mode == tf_estimator.ModeKeys.EVAL:
            eval_metrics = model_utils.metric_fn(labels, logits)

        if mode == tf_estimator.ModeKeys.TRAIN:
            with tf.control_dependencies([train_op]):
                tf.summary.scalar('classifier/finetune_lr',
                                  finetune_learning_rate)
        else:
            train_op = None

        return tf_estimator.EstimatorSpec(
            mode=mode,
            loss=loss,
            train_op=train_op,
            eval_metric_ops=eval_metrics,
        )