langs=["ru", "en"],
    src_vocab_size=1000,
    tgt_vocab_size=1000,
    d_model=4,
    encoder_layers=1,
    decoder_layers=1,
    encoder_ffn_dim=4,
    decoder_ffn_dim=4,
    encoder_attention_heads=1,
    decoder_attention_heads=1,
)

tiny_model = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")

# Test
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"])
outputs = tiny_model(**batch, return_dict=True)

print("test output:", len(outputs.logits[0]))

# Save
tiny_model.half()  # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)

print(f"Generated {mname_tiny}")

# Upload
# transformers-cli upload tiny-wmt19-en-ru
        merges_file=merges_file,
    )
    
config = FSMTConfig(
    langs=['ru', 'en'],
    src_vocab_size=1000, tgt_vocab_size=1000,
    d_model=4,
    encoder_layers=1, decoder_layers=1,
    encoder_ffn_dim=4, decoder_ffn_dim=4,
    encoder_attention_heads=1, decoder_attention_heads=1,
)

tiny_model = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")

# Test
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"], return_tensors="pt")
outputs = tiny_model(**batch)

print("test output:", len(outputs.logits[0]))

# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)

print(f"Generated {mname_tiny}")

# Upload
# transformers-cli upload tiny-wmt19-en-ru