Exemple #1
0
def save_ocr():

    generator = SampleGenerator(10)
    crnn = CRNN(generator.vocab_size() + 1)
    optimizer = tf.keras.optimizers.Adam(1e-4)
    checkpoint = tf.train.Checkpoint(model=crnn, optimizer=optimizer)
    checkpoint.restore(tf.train.latest_checkpoint('checkpoints'))
    if False == exists('model'): mkdir("model")
    crnn.save(join("model", "crnn.h5"))
def train(args):
    @tf.function
    def train_step(x, y):
        with tf.GradientTape() as tape:
            y_pred = model(x["the_input"])
            # loss = tf.reduce_mean(ctc_lambda_func((y_pred, x["the_labels"], x["input_length"].reshape((-1,1)), x["label_length"].reshape((-1,1)))))
            loss = tf.reduce_mean(ctc_lambda_func((y_pred, x["the_labels"], tf.reshape(x["input_length"], [-1, 1]), tf.reshape(x["label_length"], [-1, 1]))))
        
        # Compute gradients
        trainable_vars = model.trainable_variables
        gradients = tape.gradient(loss, trainable_vars)

        # Update weights
        model.optimizer.apply_gradients(zip(gradients, trainable_vars))
        return loss


    epochs = 1000
    iter_per_epoch = 100
    #model, test_func = get_CResRNN(weights=os.path.join("OUTPUT_DIR", "exp1", "weights06.h5"))
    #model, test_func = get_CResRNN(weights=os.path.join("OUTPUT_DIR", "weights0995.h5"))
    #model.load_weights(os.path.join("OUTPUT_DIR", "exp1", "weights15.h5"))
    #model.load_weights(os.path.join("OUTPUT_DIR", "weights0995.h5"))
    model2, test_func = CRNN_model()

    train_generator = FakeImageGenerator(args).next_gen()
    

    model = CRNN(ALPHABET)
    model.build()
    model.summary()

    # model = tf.keras.load_model('checkpoints/checkpoint')
    
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001, clipnorm=5))

    loss_train = []

    for epoch in range(1, epochs):
        print(f"Start of epoch {epoch}")

        pb = Progbar(iter_per_epoch, stateful_metrics="loss")

        for iter in range(iter_per_epoch):
            x, y = next(train_generator)
            with tf.GradientTape() as tape:
                y_pred = model(x["the_input"])
                # loss = tf.reduce_mean(ctc_lambda_func((y_pred, x["the_labels"], x["input_length"].reshape((-1,1)), x["label_length"].reshape((-1,1)))))
                loss = tf.reduce_mean(ctc_lambda_func((y_pred, x["the_labels"], tf.reshape(x["input_length"], [-1, 1]), tf.reshape(x["label_length"], [-1, 1]))))
            
            # Compute gradients
            trainable_vars = model.trainable_variables
            gradients = tape.gradient(loss, trainable_vars)

            # Update weights
            model.optimizer.apply_gradients(zip(gradients, trainable_vars))

            values = [('loss', loss)]
            pb.add(1, values=values)

        if epoch % 5 == 0:
            model.save("checkpoints/base_crnn.h5")


    
    

    # print("test2")
    # x, y = next(train_generator)
    # model.fit(x, y)
    # print("test1")
    
    x, y = next(train_generator)
    print(model(x["the_input"]))

    """