Example #1
0
    def model_fn(features, labels, mode, params):
        """Returns the model function."""
        feature = features['feature']
        labels = labels['label']
        one_hot_labels = model_utils.get_label(
            labels,
            params,
            FLAGS.src_num_classes,
            batch_size=FLAGS.train_batch_size)

        def get_logits():
            """Return the logits."""
            network_output = model.conv_model(
                feature,
                mode,
                target_dataset=FLAGS.target_dataset,
                src_hw=FLAGS.src_hw,
                target_hw=FLAGS.target_hw)
            name = FLAGS.cls_dense_name
            with tf.variable_scope('target_CLS'):
                logits = tf.layers.dense(inputs=network_output,
                                         units=FLAGS.src_num_classes,
                                         name=name)
            return logits

        logits = get_logits()
        logits = tf.cast(logits, tf.float32)

        dst_loss = tf.losses.softmax_cross_entropy(
            logits=logits,
            onehot_labels=one_hot_labels,
        )
        loss = dst_loss

        eval_metrics = model_utils.metric_fn(labels, logits)

        return tf.estimator.EstimatorSpec(
            mode=mode,
            loss=loss,
            train_op=None,
            eval_metric_ops=eval_metrics,
        )
Example #2
0
    def model_fn(features, labels, mode, params):
        """Returns the model function."""
        feature = features['feature']
        labels = labels['label']
        one_hot_labels = model_utils.get_label(
            labels,
            params,
            FLAGS.src_num_classes,
            batch_size=FLAGS.train_batch_size)

        def get_logits():
            """Return the logits."""
            avg_pool = model.conv_model(feature,
                                        mode,
                                        target_dataset=FLAGS.target_dataset,
                                        src_hw=FLAGS.src_hw,
                                        target_hw=FLAGS.target_hw)
            name = 'final_dense_dst'
            with tf.variable_scope('target_CLS'):
                logits = tf.layers.dense(
                    inputs=avg_pool,
                    units=FLAGS.src_num_classes,
                    name=name,
                    kernel_initializer=tf.random_normal_initializer(
                        stddev=.05),
                )
            return logits

        logits = get_logits()
        logits = tf.cast(logits, tf.float32)

        dst_loss = tf.losses.softmax_cross_entropy(
            logits=logits,
            onehot_labels=one_hot_labels,
        )
        dst_l2_loss = FLAGS.weight_decay * tf.add_n([
            tf.nn.l2_loss(v) for v in tf.trainable_variables()
            if 'batch_normalization' not in v.name and 'kernel' in v.name
        ])

        loss = dst_loss + dst_l2_loss

        train_op = None
        if mode == tf_estimator.ModeKeys.TRAIN:
            cur_finetune_step = tf.train.get_global_step()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                finetune_learning_rate = lr_schedule()
                optimizer = tf.train.MomentumOptimizer(
                    learning_rate=finetune_learning_rate,
                    momentum=0.9,
                    use_nesterov=True)
                train_op = tf.contrib.slim.learning.create_train_op(
                    loss, optimizer)
                with tf.variable_scope('finetune'):
                    train_op = optimizer.minimize(loss, cur_finetune_step)
        else:
            train_op = None

        eval_metrics = None
        if mode == tf_estimator.ModeKeys.EVAL:
            eval_metrics = model_utils.metric_fn(labels, logits)

        if mode == tf_estimator.ModeKeys.TRAIN:
            with tf.control_dependencies([train_op]):
                tf.summary.scalar('classifier/finetune_lr',
                                  finetune_learning_rate)
        else:
            train_op = None

        return tf_estimator.EstimatorSpec(
            mode=mode,
            loss=loss,
            train_op=train_op,
            eval_metric_ops=eval_metrics,
        )
Example #3
0
def create_plots():
    color_dict = dict(best_train='#91ebe5',
                      worst_train='#f05151',
                      best_test='#2374f7',
                      worst_test='#f01d1d')

    df = load_stats_df()
    df_train = df[df['train_mode'] == True]
    df_test = df[df['train_mode'] == False]
    correct_train = aggregate_df(df_train).sort_values(['correct'],
                                                       ascending=False)
    correct_test = aggregate_df(df_test).sort_values(['correct'],
                                                     ascending=False)

    y = [get_label(letter) for letter in list(correct_train.head(5).index)]
    x = list(round(correct_train['correct'].head(5) * 100, 2))

    y2 = [get_label(letter) for letter in list(correct_train.tail(5).index)]
    x2 = list(round((1 - correct_train['correct'].tail(5)) * 100, 2))

    y3 = [get_label(letter) for letter in list(correct_test.head(5).index)]
    x3 = list(round(correct_test['correct'].head(5) * 100, 2))

    y4 = [get_label(letter) for letter in list(correct_test.tail(5).index)]
    x4 = list(round((1 - correct_test['correct'].tail(5)) * 100, 2))

    x5, y5 = calculate_accuracies(df_train)
    x6, y6 = calculate_accuracies(df_test)

    plt.rcdefaults()
    fig, axs = plt.subplots(nrows=3, ncols=2)

    create_bar_chart(ax=axs[0, 0],
                     x=x,
                     y=y,
                     color=color_dict['best_train'],
                     inverted=True,
                     title='Letters with highest accuracy(train)',
                     xlabel='Accuracy[%]')
    create_bar_chart(ax=axs[1, 0],
                     x=x2,
                     y=y2,
                     color=color_dict['worst_train'],
                     inverted=False,
                     title='Letters with highest error(train)',
                     xlabel='Error[%]')
    create_bar_chart(ax=axs[0, 1],
                     x=x3,
                     y=y3,
                     color=color_dict['best_test'],
                     inverted=True,
                     title='Letters with highest accuracy(test)',
                     xlabel='Accuracy[%]')
    create_bar_chart(ax=axs[1, 1],
                     x=x4,
                     y=y4,
                     color=color_dict['worst_test'],
                     inverted=False,
                     title='Letters with highest error(test)',
                     xlabel='Error[%]')
    plot_accuracy_over_time(ax=axs[2, 0],
                            x=x5,
                            y=y5,
                            color='green',
                            title='Accuracy over time(train)')
    plot_accuracy_over_time(ax=axs[2, 1],
                            x=x6,
                            y=y6,
                            color='green',
                            title='Accuracy over time(test)')

    fig.patch.set_facecolor('#f0f0f0')

    plt.tight_layout()
    return fig