"WARNING: Text2Mel and SSRN have different saved configs. Will use Text2Mel config!"
        )
    Config.set_config(config_t2m)

    # Load networks
    print("Loading Text2Mel...")
    text2mel = Text2Mel().to(device)
    text2mel.eval()
    text2mel_step = state_t2m["global_step"]
    text2mel.load_state_dict(state_t2m["model"])

    print("Loading SSRN...")
    ssrn = SSRN().to(device)
    ssrn.eval()
    ssrn_step = state_ssrn["global_step"]
    ssrn.load_state_dict(state_ssrn["model"])

    while True:
        text = input("> ")
        text = spell_out_numbers(text, args.language)
        text = normalize(text)
        text = text + Config.vocab_end_of_text
        text = vocab_lookup(text)

        L = torch.tensor(text, device=device, requires_grad=False).unsqueeze(0)
        S = torch.zeros(1,
                        Config.max_T,
                        Config.F,
                        requires_grad=False,
                        device=device)
        previous_position = torch.zeros(1,
    global_step = 0

    # Learning rate decay. Noam scheme
    warmup_steps = 4000.0

    def decay(_):
        step = global_step + 1
        return warmup_steps**0.5 * min(step * warmup_steps**-1.5, step**-0.5)

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=decay)

    if args.restore_path is not None:
        print("Restoring from checkpoint: {}".format(args.restore_path))
        state = torch.load(args.restore_path, map_location=device)
        global_step = state["global_step"]
        net.load_state_dict(state["model"])
        optimizer.load_state_dict(state["optimizer"])
        scheduler.load_state_dict(state["scheduler"])
        l1_criterion.load_state_dict(state["l1_criterion"])
        bd_criterion.load_state_dict(state["bd_criterion"])

    print("Loading dataset...")
    dataset = TTSDataset(args.text_path,
                         args.mel_path,
                         args.lin_path,
                         data_in_memory=True)
    batch_sampler = BucketBatchSampler(
        inputs=[d["text"] for d in dataset.data],
        batch_size=args.batch_size,
        bucket_boundaries=[i for i in range(1, Config.max_N - 1, 20)])
    data_loader = FastDataLoader(dataset,