print(example_batch_predictions.shape,
          "# (batch_size, sequence_length, vocab_size)")

    example_sequence_input = input_example_batch[0]
    example_sequence_prediction_logits = example_batch_predictions[0]
    example_sequence_prediction_indice = tf.squeeze(tf.random.categorical(
        example_sequence_prediction_logits, num_samples=1),
                                                    axis=-1).numpy()

    print("Input:\n", text_from_ids(example_sequence_input).numpy())
    print("Next Char Predictions:\n",
          text_from_ids(example_sequence_prediction_indice).numpy())

loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam', loss=loss)
history = model.fit(haikus_dataset, epochs=50)
model.save_weights('verse{}.h5'.format(verse))
model.load_weights('verse{}.h5'.format(verse))
'''
one_step_model = OneStep(model, chars_from_ids, ids_from_chars, temperature=1)

states = None
next_char = tf.constant(['Cherry'])
result = [next_char]

for n in range(1000):
    next_char, states = one_step_model.generate_one_step(next_char, states=states)
    result.append(next_char)

result = tf.strings.join(result)
print(result[0].numpy().decode('utf-8'), '\n\n' + '_'*80)
logger.info('USER REPR. PROPAGATION:')
logger.info('user_to_session_act: {}'.format(args.user_to_ses_act))
logger.info('user_propagation_mode: {}'.format(args.user_propagation_mode))
logger.info('user_to_output: {}'.format(args.user_to_output))
logger.info('')
logger.info('EVALUATION:')
logger.info('eval_cutoff: {}'.format(args.eval_cutoff))
logger.info('eval_top_pop: {}'.format(args.eval_top_pop))
logger.info('eval_boot: {}'.format(args.eval_boot))
logger.info('eval_file: {}'.format(args.eval_file))

t0 = dt.now()
logger.info('Training started')
model.fit(train_data,
          valid_data=test_data if args.early_stopping else None,
          patience=3,
          margin=1.003,
          save_to=args.save_to,
          load_from=args.load_from)
logger.info('Training completed in {}'.format(dt.now() - t0))

logger.info('Evaluation started')
if args.eval_top_pop > 0:
    eval_items = train_data[args.item_key].value_counts()[:args.eval_top_pop].index
else:
    eval_items = None

recall, mrr, df_ranks = evaluate_sessions_batch_hier_bootstrap(model,
                                                               train_data,
                                                               test_data,
                                                               cut_off=args.eval_cutoff,
                                                               output_rankings=True,