コード例 #1
0
                manifest_filepath_list=[args.valid_manifest_list[i]],
                normalize=True,
                augment=False,
                input_type=args.input_type)
        valid_sampler = BucketingSampler(valid_data, batch_size=args.k_train)
        valid_loader = AudioDataLoader(pad_token_id=vocab.PAD_ID,
                                       dataset=valid_data,
                                       num_workers=args.num_workers)
        valid_loader_list.append(valid_loader)

    start_epoch = 0
    metrics = None
    loaded_args = None
    logging.info("Continue from checkpoint:" + args.continue_from)
    if args.training_mode == "meta":
        model, vocab, _, _, epoch, metrics, loaded_args = load_meta_model(
            args.continue_from)
    else:
        model, vocab, _, epoch, metrics, loaded_args = load_joint_model(
            args.continue_from)
    verbose = args.verbose

    loss_type = args.loss

    if USE_CUDA:
        model = model.cuda()

    logging.info(model)
    num_epochs = args.epochs

    print("Parameters: {}(trainable), {}(non-trainable)".format(
        compute_num_params(model)[0],
コード例 #2
0
                manifest_filepath_list=[args.valid_manifest_list[i]],
                normalize=True,
                augment=False,
                input_type=args.input_type)
        valid_sampler = BucketingSampler(valid_data, batch_size=args.k_train)
        valid_loader = AudioDataLoader(pad_token_id=vocab.PAD_ID,
                                       dataset=valid_data,
                                       num_workers=args.num_workers)
        valid_loader_list.append(valid_loader)

    start_epoch = 0
    metrics = None
    loaded_args = None
    if args.continue_from != "":
        logging.info("Continue from checkpoint:" + args.continue_from)
        model, vocab, inner_opt, outer_opt, epoch, metrics, loaded_args = load_meta_model(
            args.continue_from)
        start_epoch = (epoch)  # index starts from zero
        verbose = args.verbose
    else:
        inner_opt, outer_opt = None, None
        if args.model == "TRFS":
            model = init_transformer_model(args,
                                           vocab,
                                           is_factorized=args.is_factorized,
                                           r=args.r)
        else:
            logging.info("The model is not supported, check args --h")

    loss_type = args.loss

    if USE_CUDA: