def main(_):
    if not FLAGS.output_file:
        raise ValueError(
            'You must supply the path to save to with --output_file')

    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default() as graph:
        shape = input_size
        shape = (int(shape[0]), int(shape[1]), int(shape[2]))
        img_input = tf.placeholder(name='input_tensor',
                                   dtype=tf.float32,
                                   shape=(None, input_size[1], input_size[0],
                                          int(shape[2])))
        img_input = img_input * (1. / 255) - 0.5
        batch_input = tf.placeholder(name='input_batch_size',
                                     dtype=tf.int32,
                                     shape=())
        #img_4d = tf.expand_dims(img_input, 0)

        print(img_input)
        # Create network.
        crnn_params = model.CRNNNet.default_params._replace(
            imgH=input_size[1])._replace(
                seq_length=SEQ_LEN)  # ,seq_length=int(width/4+1)
        crnn = model.CRNNNet(crnn_params)

        logits, inputs, seq_len, W, b = crnn.net(img_input, batch_input)
        decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                          seq_len,
                                                          merge_repeated=False)

        indices = tf.cast(decoded[0].indices,
                          tf.int32,
                          name='sparse_tensor_indices')
        values = tf.cast(decoded[0].values,
                         tf.int32,
                         name='sparse_tensor_values')
        dense_shape = tf.cast(decoded[0].dense_shape,
                              tf.int32,
                              name='sparse_tensor_shape')

        print(indices, values, dense_shape)

        tf.constant(shape, name='input_plate_size')
        tf.constant(alphabet, dtype=tf.string, name='alphabet')
        print(alphabet)
        tf.constant(["sparse_tensor"], name="output_names")
        tf.constant(['input_tensor'], name='input_name')

        graph_def = graph.as_graph_def()
        with gfile.GFile(FLAGS.output_file, 'wb') as f:
            f.write(graph_def.SerializeToString())
            print('Successfull written to', FLAGS.output_file)
def load_from_checkpoint(shape, checkpoint_dir):
    #width_input = tf.placeholder(tf.int32, shape=())
    img_input = tf.placeholder(tf.float32, shape=(None, shape[1], shape[0], shape[2]))
    input_batch = tf.placeholder(tf.int32, shape=())
    img_input = img_input * (1. / 255) - 0.5
    #img_4d = tf.expand_dims(img_input, 0)

    # define the crnn net
    SEQ_LEN = int(shape[0]/4+1)
    crnn_params = model.CRNNNet.default_params._replace(seq_length = SEQ_LEN)._replace(imgH = shape[1])  # ,seq_length=int(width/4+1)
    crnn = model.CRNNNet(crnn_params)
    logits, inputs, seq_len, W, b = crnn.net(img_input, input_batch)

    decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len, merge_repeated=False)
    val_predict = tf.cast(decoded[0], tf.int32)

    saver = tf.train.Saver()

    sess = tf.Session()
    dir = tf.train.latest_checkpoint(checkpoint_dir)
    saver.restore(sess, dir)
    sess.run(tf.local_variables_initializer())
    print("Model restore!")
    return sess, val_predict, img_input, input_batch
def main(_):

    tf.logging.set_verbosity(tf.logging.DEBUG)#设置显示的log的阈值

    with tf.Graph().as_default():

        deploy_config = model_deploy.DeploymentConfig()
        # Create global_step.
        global_step = tf.Variable(0,name='global_step',trainable=False)

        file_name = os.path.join("./tfrecord", "mjtrain_690_999.tfrecords")

        starter_learning_rate = 0.1
        learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
                                                   100000, 0.96, staircase=True)
        tf.summary.scalar("learning_rate",learning_rate)
        sh_images, sh_labels, sh_length= read_utils.inputs( filename = file_name,batch_size=batch_size,
                                num_epochs=num_epochs)




        crnn = model.CRNNNet()
        logits, inputs, seq_len, W, b = crnn.net(sh_images)

        cost = crnn.losses(sh_labels,logits, seq_len)
        optimizer = tf.train.AdadeltaOptimizer(learning_rate = learning_rate).minimize(loss=cost,global_step=global_step)
        tf.summary.scalar("cost",cost)


        decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len, merge_repeated=False)


        acc = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), sh_labels))
        tf.summary.scalar("edit_distance",acc)

        sess = tf.Session()

        save = tf.train.Saver(max_to_keep=2)
        if tf.train.latest_checkpoint(checkpoint_dir) is None:
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)            # Start input enqueue threads.
        else:
            save.restore(sess,tf.train.latest_checkpoint(checkpoint_dir))
            sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        merged = tf.summary.merge_all()
        file_writer = tf.summary.FileWriter('./tmp/my-model', sess.graph)

        try:
            step = global_step
            while not coord.should_stop():
                start_time = time.time()

                _,merged_t,val_cost, val_ler, lr, step = sess.run([optimizer,merged,cost, acc, learning_rate, global_step])

                duration = time.time() - start_time

                print("cost", val_cost)
                file_writer.add_summary(merged_t,step)
                # Print an overview fairly often.
                if step % 10 == 0:
                    print('Step %d:  acc %.3f (%.3f sec)' % (step,val_ler,duration))
                    save.save(sess,"./tmp/crnn-model.ckpt",global_step = global_step)
                step += 1
        except tf.errors.OutOfRangeError:
            print('Done training for %d epochs, %d steps.' % (num_epochs, step))
        finally:
            # When done, ask the threads to stop.
            coord.request_stop()

            # Wait for threads to finish.
        coord.join(threads)
        sess.close()
Beispiel #4
0
    # first load image and label
    image_raw, width = load_image(img_dir, crop_size=input_size)
    label = load_label_from_img_dir(img_dir)
    label = label.lower()
    return image_raw, label, width


batch_input = tf.placeholder(tf.int32, shape=())
img_input = tf.placeholder(tf.float32, shape=(input_size[1], input_size[0], 1))
img_input = img_input * (1. / 255) - 0.5
img_4d = tf.expand_dims(img_input, 0)

# define the crnn net
#crnn_params = model.CRNNNet.default_params._replace(batch_size=1)._replace(seq_length = SEQ_LEN)._replace(imgH = input_size[1])  # ,seq_length=int(width/4+1)
crnn_params = crnn_params._replace(batch_size=1)
crnn = model.CRNNNet(crnn_params)
logits, inputs, seq_len, W, b = crnn.net(img_4d, batch_input)

decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                  seq_len,
                                                  merge_repeated=False)
val_predict = tf.cast(decoded[0], tf.int32)

saver = tf.train.Saver()

sess = tf.Session()
dir = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(sess, dir)
sess.run(tf.local_variables_initializer())
print("Model restore!")
Beispiel #5
0
def run():

    tf.logging.set_verbosity(tf.logging.DEBUG)

    with tf.Graph().as_default():

        deploy_config = model_deploy.DeploymentConfig()
        # Create global_step
        global_step = tf.Variable(0, name='global_step', trainable=False)

        file_name = os.path.join("dataset/data", "train2.tfrecords")
        train_images, train_labels, train_path = read_utils.inputs(
            filename=file_name,
            batch_size=batch_size,
            num_epochs=num_epochs,
            crop_size=model_size)

        coord = tf.train.Coordinator()
        crnn = model.CRNNNet(crnn_params)
        logits, inputs, seq_len, W, b = crnn.net(train_images)

        cost = crnn.losses(train_labels, logits, seq_len)

        learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                                   global_step,
                                                   2000,
                                                   0.9,
                                                   staircase=True)
        tf.summary.scalar("learning_rate", learning_rate)
        #optimizer = tf.train.AdadeltaOptimizer(learning_rate = learning_rate).minimize(loss=cost,global_step=global_step)
        optimizer = tf.train.RMSPropOptimizer(
            learning_rate=learning_rate).minimize(loss=cost,
                                                  global_step=global_step)

        tf.summary.scalar("cost", cost)
        decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                          seq_len,
                                                          merge_repeated=False)

        pred = tf.cast(decoded[0], tf.int32)
        acc = tf.reduce_mean(tf.edit_distance(pred, train_labels))
        tf.summary.scalar("edit_distance", acc)

        ##################################

        sess = tf.Session()

        save = tf.train.Saver(max_to_keep=20)
        if tf.train.latest_checkpoint(checkpoint_dir) is None:
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)  # Start input enqueue threads.
        else:
            save.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
            sess.run(tf.local_variables_initializer())

        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        merged = tf.summary.merge_all()
        file_writer = tf.summary.FileWriter(checkpoint_dir + 'my-model',
                                            sess.graph)

        ##################################
        try:
            step = global_step
            while not coord.should_stop():
                start_time = time.time()

                _, merged_t, train_cost, train_accuracy, lr, step, val_lbls, val_pred, paths = sess.run(
                    [
                        optimizer, merged, cost, acc, learning_rate,
                        global_step, train_labels, pred, train_path
                    ])

                duration = time.time() - start_time

                print("## Step: %d Cost: %.3f" % (step, train_cost))

                # Print an overview fairly often.
                if step % 10 == 0:
                    str_gt = sparse_tensor_to_str(val_lbls)
                    str_pred = sparse_tensor_to_str(val_pred)

                    for i in range(num_to_show):
                        print("  true: ", str_gt[i], "  result: ", str_pred[i],
                              " img_path: ", paths[i])
                    print('Step: %d  train_acc: %.3f (%.3f sec)' %
                          (step, train_accuracy, duration))

                    print('Current lr: %.8f' % lr)
                if step % 100 == 0:
                    save.save(sess,
                              checkpoint_dir + "crnn-model.ckpt",
                              global_step=global_step)
                file_writer.add_summary(merged_t, step)
                step += 1
        except tf.errors.OutOfRangeError:
            print('Done training for %d epochs, %d steps.' %
                  (num_epochs, step))
        finally:
            # When done, ask the threads to stop.
            coord.request_stop()

            # Wait for threads to finish.
            coord.join(threads)
            sess.close()
def main(_):
    tf.logging.set_verbosity(tf.logging.DEBUG)  #设置显示的log的阈值

    with tf.Graph().as_default():
        # Config model_deploy. Keep TF Slim Models structure.
        # Useful if want to need multiple GPUs and/or servers in the future.
        deploy_config = model_deploy.DeploymentConfig()
        # Create global_step.
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()
        file_name = os.path.join("../tfrecord", "train.tfrecords")

        def read_and_decode(filename,
                            num_epochs):  # read iris_contact.tfrecords
            filename_queue = tf.train.string_input_producer(
                [filename], num_epochs=num_epochs)
            reader = tf.TFRecordReader()
            print(filename_queue)
            _, serialized_example = reader.read(
                filename_queue)  # return file_name and file
            features = tf.parse_single_example(
                serialized_example,
                features={
                    'image/encoded':
                    tf.FixedLenFeature((), tf.string, default_value=''),
                    # 三个参数:shape,type,default_value
                    'image/format':
                    tf.FixedLenFeature((), tf.string, default_value='jpeg'),
                    'image/shape':
                    tf.FixedLenFeature([2], tf.int64),
                    'label':
                    tf.FixedLenFeature((), tf.string, default_value='unknow'),
                    'index':
                    tf.FixedLenFeature([1], tf.int64)
                })  # return image and label

            # img = tf.decode_raw(features['image/encoded'], tf.uint8)
            img = tf.image.decode_jpeg(features['image/encoded'])
            shape = features["image/shape"]
            img = tf.reshape(img, [32, 100, 3])  #  reshape image to 512*80*3
            img = tf.cast(img,
                          tf.float32) * (1. / 255) - 0.5  # throw img tensor
            label = features['label']  # throw label tensor
            index = features["index"]
            return img, label, shape, index

        def preprocess(image_raw):
            image = tf.image.decode_jpeg(tf.image.encode_jpeg(image_raw))
            return resize_image(image, (100, 32))

        def inputs(batch_size, num_epochs, filename):
            """Reads input data num_epochs times.
            Args:
              train: Selects between the training (True) and validation (False) data.
              batch_size: Number of examples per returned batch.
              num_epochs: Number of times to read the input data, or 0/None to
                 train forever.
            Returns:
              A tuple (images, labels), where:
              * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
                in the range [-0.5, 0.5].
              * labels is an int32 tensor with shape [batch_size] with the true label,
                a number in the range [0, mnist.NUM_CLASSES).
              Note that an tf.train.QueueRunner is added to the graph, which
              must be run using e.g. tf.train.start_queue_runners().
            """
            if not num_epochs: num_epochs = None
            #filename = os.path.join(file_dir)

            with tf.name_scope('input'):
                # Even when reading in multiple threads, share the filename
                # queue.
                image, label, shape, index = read_and_decode(
                    filename, num_epochs)

                #image = preprocess(image)
                # Shuffle the examples and collect them into batch_size batches.
                # (Internally uses a RandomShuffleQueue.)
                # We run this in two threads to avoid being a bottleneck.
                images, shuffle_labels, sshape, sindex = tf.train.shuffle_batch(
                    [image, label, shape, index],
                    batch_size=batch_size,
                    num_threads=2,
                    capacity=1000 + 3 * batch_size,
                    # Ensures a minimum amount of shuffling of examples.
                    min_after_dequeue=100)

                return images, shuffle_labels, sshape, sindex

        with tf.Graph().as_default():
            # Input images and labels.
            starter_learning_rate = 0.1
            learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                                       global_step,
                                                       100000,
                                                       0.96,
                                                       staircase=True)
            images, shuffle_labels, sshape, sindex = inputs(
                filename=file_name,
                batch_size=batch_size,
                num_epochs=num_epochs)

            crnn = model.CRNNNet()
            logits, inputs, seq_len, W, b = crnn.net(images)

            shuffle_labels = ['123456', '123', '12342']
            labels = shuffle_labels

            def sparse_tuple_from(sequences, dtype=np.int32):
                """Create a sparse representention of x.
                Args:
                    sequences: a list of lists of type dtype where each element is a sequence
                Returns:
                    A tuple with (indices, values, shape)
                """
                indices = []
                values = []

                for n, seq in enumerate(sequences):
                    indices.extend(zip([n] * len(seq), range(len(seq))))
                    values.extend(seq)

                indices = np.asarray(indices, dtype=np.int64)
                values = np.asarray(values, dtype=dtype)
                shape = np.asarray(
                    [len(sequences),
                     np.asarray(indices).max(0)[1] + 1],
                    dtype=np.int64)

                return indices, values, shape

            sparse_labels = sparse_tuple_from(labels)

            cost = crnn.losses(sparse_labels, logits, seq_len)
            optimizer = tf.train.AdadeltaOptimizer(
                learning_rate=learning_rate).minimize(loss=cost,
                                                      global_step=global_step)

            # Option 2: tf.contrib.ctc.ctc_beam_search_decoder
            # (it's slower but you'll get better results)
            decoded, log_prob = tf.nn.ctc_beam_search_decoder(
                logits, seq_len, merge_repeated=False)

            # Accuracy: label error rate
            acc = tf.reduce_mean(
                tf.edit_distance(tf.cast(decoded[0], tf.int32), sparse_labels))

            sess = tf.Session()
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)  # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
            try:
                step = 0
                while not coord.should_stop():
                    start_time = time.time()

                    # Run one step of the model.  The return values are
                    # the activations from the `train_op` (which is
                    # discarded) and the `loss` op.  To inspect the values
                    # of your ops or variables, you may include them in
                    # the list passed to sess.run() and the value tensors
                    # will be returned in the tuple from the call.
                    #timages, tsparse_labels, tsshape, tsindex = sess.run([images, sparse_labels, sshape, sindex])

                    val_cost, val_ler, lr, step = sess.run(
                        [cost, acc, learning_rate, global_step])

                    duration = time.time() - start_time

                    print(val_cost)

                    # Print an overview fairly often.
                    if step % 10 == 0:
                        print('Step %d:  (%.3f sec)' % (step, duration))
                    step += 1
            except tf.errors.OutOfRangeError:
                print('Done training for %d epochs, %d steps.' %
                      (num_epochs, step))
            finally:
                # When done, ask the threads to stop.
                coord.request_stop()

                # Wait for threads to finish.
            coord.join(threads)
            sess.close()