def select_device(self): return utils.select_model_device(self.elements["model"], self.params["use_gpu"], gpu_id=self.params["gpu_id"], benchmark=self.params["benchmark"])
try: # nnet_config include model_blueprint and model_creation if args.nnet_config != "": model_blueprint, model_creation = utils.read_nnet_config(args.nnet_config) elif args.model_blueprint is not None and args.model_creation is not None: model_blueprint = args.model_blueprint model_creation = args.model_creation else: raise ValueError("Expected nnet_config or (model_blueprint, model_creation) to exist.") model = utils.create_model_from_py(model_blueprint, model_creation) model.load_state_dict(torch.load(args.model_path, map_location='cpu'), strict=False) # Select device model = utils.select_model_device(model, args.use_gpu, gpu_id=args.gpu_id) model.eval() with kaldi_io.open_or_fd(args.feats_rspecifier, "rb") as r, \ kaldi_io.open_or_fd(args.vectors_wspecifier, 'wb') as w: for line in r: # (key, rxfile, chunk_start, chunk_end) = line.decode().split(' ') # chunk=[chunk_start, chunk_end] # print("Process utterance for key {0}".format(key)) # feats = kaldi_io.read_mat(rxfile, chunk=chunk) (key, rxfile) = line.decode().split(' ') print("Process utterance for key {0}".format(key)) feats = kaldi_io.read_mat(rxfile) embedding = model.extract_embedding(feats) kaldi_io.write_vec_flt(w, embedding.numpy(), key=key)