예제 #1
0
                                           vocab,
                                           is_factorized=args.is_factorized,
                                           r=args.r)
        else:
            logging.info("The model is not supported, check args --h")

    loss_type = args.loss

    if USE_CUDA:
        model = model.cuda()

    logging.info(model)
    num_epochs = args.epochs

    print("Parameters: {}(trainable), {}(non-trainable)".format(
        compute_num_params(model)[0],
        compute_num_params(model)[1]))

    trainer = JointTrainer()
    trainer.train(model,
                  vocab,
                  train_data_list,
                  valid_loader_list,
                  loss_type,
                  start_epoch,
                  num_epochs,
                  args,
                  evaluate_every=args.evaluate_every,
                  last_metrics=metrics,
                  early_stop=args.early_stop,
                  cpu_state_dict=args.cpu_state_dict,
예제 #2
0
        valid_loader = AudioDataLoader(pad_token_id=0, dataset=valid_data, num_workers=args.num_workers)
        valid_loader_list.append(valid_loader)

    start_epoch = 0
    metrics = None
    loaded_args = None
    if args.continue_from != "":
        logging.info("Continue from checkpoint:" + args.continue_from)
        model, vocab, opt, epoch, metrics, loaded_args = load_model(args.continue_from)
        start_epoch = (epoch)  # index starts from zero
        verbose = args.verbose
    else:
        if args.model == "TRFS":
            model = init_transformer_model(args, vocab, is_factorized=args.is_factorized, r=args.r)
            opt = init_optimizer(args, model, "noam")
        else:
            logging.info("The model is not supported, check args --h")
    
    loss_type = args.loss

    if USE_CUDA:
        model = model.cuda()

    logging.info(model)
    num_epochs = args.epochs

    print("Parameters: {}(trainable), {}(non-trainable)".format(compute_num_params(model)[0], compute_num_params(model)[1]))

    trainer = Trainer()
    trainer.train(model, vocab, train_loader, train_sampler, valid_loader_list, opt, loss_type, start_epoch, num_epochs, args, metrics, early_stop=args.early_stop)