words_in_dir.add(word_dir.parts[-1])

index_by_word = {}
with open(str(dictionary_path)) as f:
    for i, word in enumerate(f.readlines()):
        stripped = word.strip()
        if (stripped in words_in_dir):
            index_by_word[stripped] = i

num_words = len(index_by_word)

model_builder = ModelBuilder(num_frames, frame_shape, num_words, noise_dim,
                             learning_rate)

generator = model_builder.build_generator()
discriminator = model_builder.build_discriminator()

cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)


def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss
    return total_loss


def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)