예제 #1
0
    to_extracted_epochs = [
        "21"
    ]  # It is model's name, such as 10.params or final.params (suffix is w.r.t package).

    nj = 10
    force = False
    use_gpu = True
    gpu_id = ""
    sleep_time = 10

    # Run a batch extracting process.
    try:
        for position in to_extracted_positions:
            # Generate the extracting config from nnet config where
            # which position to extract depends on the 'extracted_embedding' parameter of model_creation (by my design).
            model_blueprint, model_creation = utils.read_nnet_config(
                "{0}/config/nnet.config".format(model_dir))
            model_creation = model_creation.replace(
                "training=True", "training=False"
            )  # To save memory without loading some independent components.
            model_creation = model_creation.replace(
                model_params["extracted_embedding"], position)
            extract_config = "{0}.extract.config".format(position)
            utils.write_nnet_config(
                model_blueprint, model_creation,
                "{0}/config/{1}".format(model_dir, extract_config))
            for epoch in to_extracted_epochs:
                model_file = "{0}.{1}".format(epoch, suffix)
                point_name = "{0}_epoch_{1}".format(position, epoch)

                # If run a trainer with background thread (do not be supported now) or run this launcher extrally with stage=4
                # (it means another process), then this while-listen is useful to start extracting immediately (but require more gpu-memory).
예제 #2
0
parser.add_argument("feats_rspecifier", metavar="feats-rspecifier",
                    type=str, help="")

parser.add_argument("vectors_wspecifier", metavar="vectors-wspecifier",
                    type=str, help="")

print(' '.join(sys.argv))

args = parser.parse_args()

# Start

try:
    # nnet_config include model_blueprint and model_creation
    if args.nnet_config != "":
        model_blueprint, model_creation = utils.read_nnet_config(args.nnet_config)
    elif args.model_blueprint is not None and args.model_creation is not None:
        model_blueprint = args.model_blueprint
        model_creation = args.model_creation
    else:
        raise ValueError("Expected nnet_config or (model_blueprint, model_creation) to exist.")

    model = utils.create_model_from_py(model_blueprint, model_creation)
    model.load_state_dict(torch.load(args.model_path, map_location='cpu'), strict=False)

    # Select device
    model = utils.select_model_device(model, args.use_gpu, gpu_id=args.gpu_id)

    model.eval()

    with kaldi_io.open_or_fd(args.feats_rspecifier, "rb") as r, \