vocab_size=dtgen.tokenizer.vocab_size)

        model.compile(learning_rate=0.001)
        model.load_checkpoint(target=target_path)

        if args.train:
            model.summary(output_path, "summary.txt")
            callbacks = model.get_callbacks(logdir=output_path,
                                            checkpoint=target_path,
                                            verbose=1)

            start_time = time.time()
            h = model.fit(x=dtgen.next_train_batch(),
                          epochs=args.epochs,
                          steps_per_epoch=dtgen.steps['train'],
                          validation_data=dtgen.next_valid_batch(),
                          validation_steps=dtgen.steps['valid'],
                          callbacks=callbacks,
                          shuffle=True,
                          verbose=1)
            total_time = time.time() - start_time

            loss = h.history['loss']
            val_loss = h.history['val_loss']

            min_val_loss = min(val_loss)
            min_val_loss_i = val_loss.index(min_val_loss)

            time_epoch = (total_time / len(loss))
            total_item = (dtgen.size['train'] + dtgen.size['valid'])

            t_corpus = "\n".join([
    model.compile(learning_rate=0.001)
    model.load_checkpoint(target=target_path)

    if args.train:
        model.summary(output_path, "summary.txt")
        callbacks = model.get_callbacks(logdir=output_path,
                                        checkpoint=target_path,
                                        verbose=1)

        start_time = datetime.datetime.now()

        h = model.fit(x=ds.getNext().imgs,
                      epochs=args.epochs,
                      steps_per_epoch=ds.train_steps,
                      validation_data=ds.getNext(),
                      validation_steps=ds.valid_steps,
                      callbacks=callbacks,
                      shuffle=True,
                      verbose=1)

        total_time = datetime.datetime.now() - start_time

        loss = h.history['loss']
        val_loss = h.history['val_loss']

        min_val_loss = min(val_loss)
        min_val_loss_i = val_loss.index(min_val_loss)

        time_epoch = (total_time / len(loss))
        total_item = (len(ds.samples))