コード例 #1
0
ファイル: test.py プロジェクト: mlnagents/cnn_lstm_ctc_ocr
def _get_testing(rnn_logits,sequence_length,label,label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:  Batch-normalized edit distance on beam search max
       sequence_error: Batch-normalized sequence error rate
    """
    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 
    with tf.name_scope("test"):
        predictions, probability = tf.nn.ctc_beam_search_decoder(rnn_logits, # вытаскивание log_probabilities из ctc
                                                   sequence_length,
                                                   beam_width=128,
                                                   top_paths=1,
                                                   merge_repeated=False) # если True, то на выходе модели не будет повторяющихся символов
        #predictions, probability = tf.nn.ctc_greedy_decoder(rnn_logits,  # альтернативный loss (?)
        #                                           sequence_length,
        #                                           merge_repeated=True)
        hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance
        label_errors = tf.edit_distance(hypothesis, label, normalize=False) # расстояние Левенштейна
        sequence_errors = tf.count_nonzero(label_errors, axis=0) # подсчет ненулевых значнией, то есть случаев, когда выход модели не совпадает с gt
        total_label_error = tf.reduce_sum(label_errors) # рассчет суммы расстояний Левенштейна по батчу
        total_labels = tf.reduce_sum(label_length) # количество символов в gt для всего батча
        label_error = tf.truediv(total_label_error, tf.cast(total_labels, tf.float32), name='label_error') # нормированное расстояние Левенштейна (деленное на количество символов)
        sequence_error = tf.truediv(tf.cast(sequence_errors, tf.int32), tf.shape(label_length)[0], name='sequence_error') # доля неправильных ответов
        tf.summary.scalar( 'loss', loss )
        tf.summary.scalar( 'label_error', label_error )
        tf.summary.scalar( 'sequence_error', sequence_error )

    return loss, label_error, sequence_error, predictions[0], probability
コード例 #2
0
ファイル: test.py プロジェクト: schperics/cnn_lstm_ctc_ocr
def _get_testing(rnn_logits, sequence_length, label, label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:  Batch-normalized edit distance on beam search max
       sequence_error: Batch-normalized sequence error rate
    """
    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)
    with tf.name_scope("test"):
        predictions, _ = tf.nn.ctc_beam_search_decoder(rnn_logits,
                                                       sequence_length,
                                                       beam_width=128,
                                                       top_paths=1,
                                                       merge_repeated=True)
        hypothesis = tf.cast(predictions[0], tf.int32)  # for edit_distance
        label_errors = tf.edit_distance(hypothesis, label, normalize=False)
        sequence_errors = tf.count_nonzero(label_errors, axis=0)
        total_label_error = tf.reduce_sum(label_errors)
        total_labels = tf.reduce_sum(label_length)
        label_error = tf.truediv(total_label_error,
                                 tf.cast(total_labels, tf.float32),
                                 name='label_error')
        sequence_error = tf.truediv(tf.cast(sequence_errors, tf.int32),
                                    tf.shape(label_length)[0],
                                    name='sequence_error')
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('label_error', label_error)
        tf.summary.scalar('sequence_error', sequence_error)

    return loss, label_error, sequence_error
コード例 #3
0
def _get_testing( rnn_logits,sequence_length,label,label_length,
                  continuous_eval, lexicon, lexicon_prior ):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:   batch level edit distance on beam search max
       sequence_error: batch level sequence error rate
    """

    with tf.name_scope( "train" ):
        # Reduce by mean (rather than sum) if doing continuous evaluation
        batch_loss = model.ctc_loss_layer( rnn_logits,label,sequence_length,
                                           reduce_mean=continuous_eval) 
    with tf.name_scope( "test" ):
        predictions,_ = _get_output( rnn_logits, sequence_length,
                                     lexicon, lexicon_prior )

        hypothesis = tf.cast( predictions[0], tf.int32 ) # for edit_distance

        # Per-sequence statistic
        num_label_errors = tf.edit_distance( hypothesis, label, 
                                             normalize=False )

        # Per-batch summary counts
        batch_num_label_errors = tf.reduce_sum( num_label_errors)
        batch_num_sequence_errors = tf.count_nonzero( num_label_errors, axis=0 )
        batch_num_labels = tf.reduce_sum( label_length )
        
        # Wide integer type casts (prefer unsigned, but truediv dislikes those)
        batch_num_label_errors = tf.cast( batch_num_label_errors, tf.int64 )
        batch_num_sequence_errors = tf.cast( batch_num_sequence_errors, 
                                             tf.int64 )
        batch_num_labels = tf.cast( batch_num_labels, tf.int64)
        
    return batch_loss, batch_num_label_errors, batch_num_sequence_errors, \
        batch_num_labels, predictions
コード例 #4
0
ファイル: model_fn.py プロジェクト: vincent1995/SmartPackage
def _get_testing(rnn_logits, sequence_length, label, label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:   batch level edit distance on beam search max
       sequence_error: batch level sequence error rate
    """

    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)
    with tf.name_scope("test"):
        predictions, _ = tf.nn.ctc_beam_search_decoder(rnn_logits,
                                                       sequence_length,
                                                       beam_width=128,
                                                       top_paths=1,
                                                       merge_repeated=True)

        hypothesis = tf.cast(predictions[0], tf.int32)  # for edit_distance

        # Per-sequence statistic
        num_label_errors = tf.edit_distance(hypothesis, label, normalize=False)

        # Per-batch summary counts
        batch_num_label_errors = tf.reduce_sum(num_label_errors)
        batch_num_sequence_errors = tf.count_nonzero(num_label_errors, axis=0)
        batch_num_labels = tf.reduce_sum(label_length)

        # Wide integer type casts (prefer unsigned, but truediv dislikes those)
        batch_num_label_errors = tf.cast(batch_num_label_errors, tf.int64)
        batch_num_sequence_errors = tf.cast(batch_num_sequence_errors,
                                            tf.int64)
        batch_num_labels = tf.cast(batch_num_labels, tf.int64)

    return loss, batch_num_label_errors, batch_num_sequence_errors, \
        batch_num_labels, predictions
コード例 #5
0
def _get_training(rnn_logits, label, sequence_length, global_steps):
    """Set up training ops"""
    with tf.name_scope("train"):

        rnn_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES
                                     )  #tf.get_collection:从一个集合中取出全部变量,是一个列
        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)
        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(extra_update_ops):  #控制依赖
            learning_rate = tf.train.exponential_decay(
                FLAGS.learning_rate,
                global_steps,
                FLAGS.decay_steps,
                FLAGS.decay_rate,
                staircase=FLAGS.decay_staircase,
                name='learning_rate')
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=FLAGS.momentum)
            train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=global_steps,
                learning_rate=learning_rate,
                optimizer=optimizer,
                variables=rnn_vars)

            tf.summary.scalar('learning_rate', learning_rate)

    return train_op
コード例 #6
0
ファイル: test.py プロジェクト: trigrass2/cnn_lstm_ctc_ocr
def _get_testing(rnn_logits,sequence_length,label,label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:  Batch-normalized edit distance on beam search max
       sequence_error: Batch-normalized sequence error rate
    """
    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 
    with tf.name_scope("test"):
        predictions,_ = tf.nn.ctc_beam_search_decoder(rnn_logits, 
                                                   sequence_length,
                                                   beam_width=128,
                                                   top_paths=1,
                                                   merge_repeated=True)
        hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance
        label_errors = tf.edit_distance(hypothesis, label, normalize=False)
        sequence_errors = tf.count_nonzero(label_errors,axis=0)
        total_label_error = tf.reduce_sum( label_errors )
        total_labels = tf.reduce_sum( label_length )
        label_error = tf.truediv( total_label_error, 
                                  tf.cast(total_labels, tf.float32 ),
                                  name='label_error')
        sequence_error = tf.truediv( tf.cast( sequence_errors, tf.int32 ),
                                     tf.shape(label_length)[0],
                                     name='sequence_error')
        tf.summary.scalar( 'loss', loss )
        tf.summary.scalar( 'label_error', label_error )
        tf.summary.scalar( 'sequence_error', sequence_error )

    return loss, label_error, sequence_error
コード例 #7
0
def _get_training(rnn_logits, label, sequence_length, label_length):
    # Set up training ops
    with tf.name_scope('train'):
        if FLAGS.tune_scope:
            scope = FLAGS.tune_scope
        else:
            scope = 'convnet|rnn'
        rnn_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope=scope)
        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)
        # Update batch norm stats [http://stackoverflow.com/questions/43234667]
        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(extra_update_ops):
            learning_rate = tf.train.exponential_decay(
                FLAGS.learning_rate,
                tf.train.get_global_step(),
                FLAGS.decay_steps,
                FLAGS.decay_rate,
                staircase=FLAGS.decay_staircase,
                name='learning_rate')
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=FLAGS.momentum)
            train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=tf.train.get_global_step(),
                learning_rate=learning_rate,
                optimizer=optimizer,
                variables=rnn_vars)

    with tf.name_scope('test'):
        predictions, probability = tf.nn.ctc_beam_search_decoder(
            rnn_logits,  # вытаскивание log_probabilities из ctc
            sequence_length,
            beam_width=128,
            top_paths=1,
            merge_repeated=False
        )  # если True, то на выходе модели не будет повторяющихся символов
        hypothesis = tf.cast(predictions[0], tf.int32)  # for edit_distance
        label_errors = tf.edit_distance(
            hypothesis, label, normalize=False)  # расстояние Левенштейна
        sequence_errors = tf.count_nonzero(
            label_errors, axis=0
        )  # подсчет ненулевых значнией, то есть случаев, когда выход модели не совпадает с gt
        total_label_error = tf.reduce_sum(
            label_errors)  # рассчет суммы расстояний Левенштейна по батчу
        total_labels = tf.reduce_sum(
            label_length)  # количество символов в gt для всего батча
        label_error = tf.truediv(
            total_label_error,
            tf.cast(total_labels, tf.float32),
            name='label_error'
        )  # нормированное расстояние Левенштейна (деленное на количество символов)
        sequence_error = tf.truediv(
            tf.cast(sequence_errors, tf.int32),
            tf.shape(label_length)[0],
            name='sequence_error')  # доля неправильных ответов

    return train_op, label_error, sequence_error, predictions[0], probability
コード例 #8
0
def _get_training(rnn_logits, label, sequence_length, tune_scope,
                  learning_rate, decay_steps, decay_rate, decay_staircase,
                  momentum):
    """Set up training ops"""

    with tf.compat.v1.name_scope("train"):

        if tune_scope:
            scope = tune_scope
        else:
            scope = "convnet|rnn"

        rnn_vars = tf.compat.v1.get_collection(
            tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=scope)

        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)

        # Update batch norm stats [http://stackoverflow.com/questions/43234667]
        extra_update_ops = tf.compat.v1.get_collection(
            tf.compat.v1.GraphKeys.UPDATE_OPS)

        with tf.control_dependencies(extra_update_ops):

            # Calculate the learning rate given the parameters
            learning_rate_tensor = tf.compat.v1.train.exponential_decay(
                learning_rate,
                tf.compat.v1.train.get_global_step(),
                decay_steps,
                decay_rate,
                staircase=decay_staircase,
                name='learning_rate')

            optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=learning_rate_tensor, beta1=momentum)

            #train_op = optimizer.minimize(loss = loss, global_step = tf.compat.v1.train.get_global_step(), var_list=rnn_vars)

            # train_op = optimizer.minimize(
            #     loss = loss,
            #     global_step = tf.compat.v1.train.get_global_step(),
            #     #learning_rate=learning_rate_tensor,
            #     var_list=rnn_vars
            #     )
            train_op = layers.optimize_loss(
                loss=loss,
                global_step=tf.compat.v1.train.get_global_step(),
                learning_rate=learning_rate_tensor,
                optimizer=optimizer,
                variables=rnn_vars)

            tf.compat.v1.summary.scalar('learning_rate', learning_rate_tensor)

    return train_op, loss
コード例 #9
0
def _get_training(rnn_logits, label, sequence_length):
    """Set up training ops"""
    """
    接受RNN层预测的logits,与真实标签label,结合序列长度,进行训练
    loss是CTC损失
    优化的是CNN和RNN(bi-LSTM)网络的参数
    """
    with tf.name_scope("train"):

        if FLAGS.tune_scope:
            scope = FLAGS.tune_scope
        else:
            scope = "convnet|rnn"

        # scope="convnet|rnn",即参数包含CNN与RNN两个网络
        rnn_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope=scope)

        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)

        # Update batch norm stats [http://stackoverflow.com/questions/43234667]
        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.control_dependencies(extra_update_ops):

            learning_rate = tf.train.exponential_decay(
                FLAGS.learning_rate,
                tf.train.get_global_step(),
                FLAGS.decay_steps,
                FLAGS.decay_rate,
                staircase=FLAGS.decay_staircase,
                name='learning_rate')

            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=FLAGS.momentum)

            train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=tf.train.get_global_step(),
                learning_rate=learning_rate,
                optimizer=optimizer,
                variables=rnn_vars)

            tf.summary.scalar('learning_rate', learning_rate)

    return train_op
コード例 #10
0
ファイル: train.py プロジェクト: hsiangchun/CAPTCHA
def _get_training(rnn_logits,label,sequence_length):
    """Set up training ops"""
    with tf.name_scope("train"):

        if FLAGS.tune_scope:
            scope=FLAGS.tune_scope
        else:
            scope="convnet|rnn"

        rnn_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES,
                                       scope=scope)

        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 

        # Update batch norm stats [http://stackoverflow.com/questions/43234667]
        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.control_dependencies(extra_update_ops):

            learning_rate = tf.train.exponential_decay(
                FLAGS.learning_rate,
                tf.train.get_global_step(),
                FLAGS.decay_steps,
                FLAGS.decay_rate,
                staircase=FLAGS.decay_staircase,
                name='learning_rate')

            optimizer = tf.train.AdamOptimizer(
                learning_rate=learning_rate,
                beta1=FLAGS.momentum)
			
            #Given loss and parameters for optimizer,returns a training op.
			#If not set "summaries",the loss,the learing rate,and the global norm of the gradients will be reported.
            train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=tf.train.get_global_step(),
                learning_rate=learning_rate, 
                optimizer=optimizer,
                variables=rnn_vars)

            tf.summary.scalar( 'learning_rate', learning_rate )
    return train_op
コード例 #11
0
ファイル: train.py プロジェクト: trigrass2/cnn_lstm_ctc_ocr
def _get_training(rnn_logits,label,sequence_length):
    """Set up training ops"""
    with tf.name_scope("train"):

        if FLAGS.tune_scope:
            scope=FLAGS.tune_scope
        else:
            scope="convnet|rnn"

        rnn_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES,
                                       scope=scope)

        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 

        # Update batch norm stats [http://stackoverflow.com/questions/43234667]
        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.control_dependencies(extra_update_ops):

            learning_rate = tf.train.exponential_decay(
                FLAGS.learning_rate,
                tf.train.get_global_step(),
                FLAGS.decay_steps,
                FLAGS.decay_rate,
                staircase=FLAGS.decay_staircase,
                name='learning_rate')

            optimizer = tf.train.AdamOptimizer(
                learning_rate=learning_rate,
                beta1=FLAGS.momentum)
            
            train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=tf.train.get_global_step(),
                learning_rate=learning_rate, 
                optimizer=optimizer,
                variables=rnn_vars)

            tf.summary.scalar( 'learning_rate', learning_rate )

    return train_op
コード例 #12
0
def _get_training(args, rnn_logits, label, sequence_length):
    """Set up training ops"""

    with tf.name_scope("train"):

        scope = "convnet|rnn"

        rnn_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope=scope)

        loss = model.ctc_loss_layer(rnn_logits, label, sequence_length)

        # Update batch norm stats [http://stackoverflow.com/questions/43234667]
        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.control_dependencies(extra_update_ops):

            learning_rate = tf.train.exponential_decay(
                args.learning_rate,
                tf.train.get_global_step(),
                args.decay_steps,
                args.decay_rate,
                staircase=args.decay_staircase,
                name='learning_rate')

            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=args.momentum)

            train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=tf.train.get_global_step(),
                learning_rate=learning_rate,
                optimizer=optimizer,
                variables=rnn_vars)

    return train_op, loss