Exemplo n.º 1
0
def def_model():
    qnd.add_flag("hidden_layer_size",
                 type=int,
                 default=64,
                 help="Hidden layer size")

    def model(image, number=None, mode=None):
        h = tf.contrib.layers.fully_connected(image,
                                              qnd.FLAGS.hidden_layer_size)
        h = tf.contrib.layers.fully_connected(h, 10, activation_fn=None)

        predictions = tf.argmax(h, axis=1)

        if mode == tf.contrib.learn.ModeKeys.INFER:
            return predictions

        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=number,
                                                           logits=h))

        return predictions, loss, minimize(loss), {
            "accuracy":
            tf.contrib.metrics.streaming_accuracy(predictions, number)[1],
        }

    return model
def add_flags():
    qnd.add_flag("regularization_scale", type=float, default=1e-8)

    adder = qnd.FlagAdder()

    adder.add_flag("word_embedding_size", type=int, default=100)
    adder.add_flag("sentence_embedding_size", type=int, default=100)
    adder.add_flag("document_embedding_size", type=int, default=100)
    adder.add_flag("context_vector_size", type=int, default=100)

    return adder
def add_flags():
    qnd.add_flag("regularization_scale", type=float, default=1e-8)

    adder = qnd.FlagAdder()

    adder.add_flag("word_embedding_size", type=int, default=100)
    adder.add_flag("sentence_embedding_size", type=int, default=100)
    adder.add_flag("document_embedding_size", type=int, default=100)
    adder.add_flag("context_vector_size", type=int, default=100)

    return adder
Exemplo n.º 4
0
def def_ar_lm():
    qnd.add_flag('cell_size', type=int, default=128)
    qnd.add_flag('num_unroll', type=int, default=16)
    qnd.add_flag('batch_size', type=int, default=64)
    qnd.add_flag('num_batch_threads', type=int, default=os.cpu_count())
    qnd.add_flag('batch_queue_capacity', type=int, default=1024)

    def ar_lm(key, sentence, labels, *, char_embeddings):
        cell = tf.contrib.rnn.LayerNormBasicLSTMCell(qnd.FLAGS.cell_size)

        batch = tf.contrib.training.batch_sequences_with_states(
            key,
            input_sequences={
                'sentence': tf.gather(char_embeddings, sentence),
                'labels': labels,
            },
            input_context={},
            input_length=None,
            initial_states={
                'c': tf.zeros([cell.state_size.c], tf.float32),
                'h': tf.zeros([cell.state_size.h], tf.float32),
            },
            num_unroll=qnd.FLAGS.num_unroll,
            batch_size=qnd.FLAGS.batch_size,
            num_threads=qnd.FLAGS.num_batch_threads,
            capacity=qnd.FLAGS.batch_queue_capacity)

        outputs, _ = tf.nn.state_saving_rnn(cell,
                                            tf.unstack(
                                                batch.sequences['sentence'],
                                                axis=1),
                                            sequence_length=batch.length,
                                            state_saver=batch,
                                            state_name=('c', 'h'))

        logits = batch_linear(outputs, ex.static_shape(char_embeddings)[0])
        labels = batch.sequences['labels']

        loss = sequence_labeling_loss(logits, labels, batch.length)

        return (
            {
                'key':
                key,
                'labels': (tf.argmax(logits, axis=2) *
                           tf.sequence_mask(batch.length, dtype=tf.int64)),
            },
            loss,
            ex.minimize(loss),
        )

    return ar_lm
Exemplo n.º 5
0
def def_char_lm():
    get_chars = qndex.nlp.def_chars()
    qnd.add_flag('char_embedding_size', type=int, default=100)
    ar_lm = def_ar_lm()

    def char_lm(key, sentence, labels):
        return ar_lm(key,
                     sentence,
                     labels,
                     char_embeddings=ex.embeddings(
                         id_space_size=len(get_chars()),
                         embedding_size=qnd.FLAGS.char_embedding_size,
                         name='char_embeddings'))

    return char_lm
def add_flags():
    adder = add_child_flags()
    adder.add_flag("dropout_keep_prob", type=float, default=0.5)
    adder.add_flag("nums_of_cnn_channels",
                   type=argtyp.int_list,
                   default=[32] * 4)
    adder.add_flag("nums_of_attention_cnn_channels",
                   type=argtyp.int_list,
                   default=[32] * 3)

    qnd.add_required_flag("font_file")
    qnd.add_flag("font_size", type=int, default=32)
    qnd.add_flag("save_font_array_file")

    return adder
Exemplo n.º 7
0
def add_flags():
    adder = add_child_flags()
    adder.add_flag("dropout_keep_prob", type=float, default=0.5)
    adder.add_flag("nums_of_cnn_channels",
                   type=argtyp.int_list,
                   default=[32] * 4)
    adder.add_flag("nums_of_attention_cnn_channels",
                   type=argtyp.int_list,
                   default=[32] * 3)

    qnd.add_required_flag("font_file")
    qnd.add_flag("font_size", type=int, default=32)
    qnd.add_flag("save_font_array_file")

    return adder
def add_flags():
    adder = add_child_flags()
    adder.add_flag("dropout_keep_prob", type=float, default=0.5)

    qnd.add_required_flag("font_file")
    qnd.add_flag("font_size", type=int, default=32)
    qnd.add_flag("save_font_array_file")
    qnd.add_flag("regularization_scale", type=float, default=1e-8)

    return adder