コード例 #1
0
    def __init__(
        self,
        config: Config,
        model: WaveRNN,
        max_batch_size: int = 10,
        use_cpp_inference: bool = True,
    ) -> None:
        self.config = config
        self.model = model
        self.max_batch_size = max_batch_size
        self.use_cpp_inference = use_cpp_inference

        self.sampling_rate = config.dataset.sampling_rate
        self.mulaw = config.dataset.mulaw

        assert not self.dual_softmax

        # setup cpp inference
        if use_cpp_inference:
            import yukarin_autoreg_cpp

            params = get_fast_forward_params(self.model)
            local_size = (config.model.conditioning_size * 2
                          if config.model.conditioning_size is not None else 0)
            yukarin_autoreg_cpp.initialize(
                graph_length=1000,
                max_batch_size=max_batch_size,
                local_size=local_size,
                hidden_size=config.model.hidden_size,
                embedding_size=config.model.embedding_size,
                linear_hidden_size=config.model.linear_hidden_size,
                output_size=2**config.model.bit_size,
                x_embedder_W=to_numpy(params["x_embedder_W"]),
                gru_xw=to_numpy(params["gru_xw"]),
                gru_xb=to_numpy(params["gru_xb"]),
                gru_hw=to_numpy(params["gru_hw"]),
                gru_hb=to_numpy(params["gru_hb"]),
                O1_W=to_numpy(params["O1_W"]),
                O1_b=to_numpy(params["O1_b"]),
                O2_W=to_numpy(params["O2_W"]),
                O2_b=to_numpy(params["O2_b"]),
            )
コード例 #2
0
    def __init__(
        self,
        config: Config,
        predictor: Union[WaveRNN, Path],
        use_gpu: bool,
        max_batch_size: int = 10,
        use_fast_inference: bool = True,
    ):
        self.config = config
        self.max_batch_size = max_batch_size
        self.use_gpu = use_gpu
        self.use_fast_inference = use_fast_inference

        self.sampling_rate = config.dataset.sampling_rate
        self.mulaw = config.dataset.mulaw
        self.bit_size = config.dataset.bit_size
        self.device = torch.device("cuda") if use_gpu else torch.device("cpu")

        if isinstance(predictor, Path):
            state_dict = torch.load(predictor, map_location=self.device)
            predictor = create_predictor(config.network)
            predictor.load_state_dict(state_dict)
        self.predictor = predictor.eval().to(self.device)

        if use_fast_inference and use_gpu:
            # setup cpp inference
            import yukarin_autoreg_cpp

            params = get_fast_forward_params(self.predictor)
            local_size = (config.network.conditioning_size * 2 if
                          config.network.conditioning_size is not None else 0)
            yukarin_autoreg_cpp.initialize(
                graph_length=1000,
                max_batch_size=max_batch_size,
                local_size=local_size,
                hidden_size=config.network.hidden_size,
                embedding_size=config.network.embedding_size,
                linear_hidden_size=config.network.linear_hidden_size,
                output_size=2**config.network.bit_size,
                **params,
            )
コード例 #3
0
params = get_fast_forward_params(model)


def to_numpy(a):
    if isinstance(a, Tensor):
        a = a.detach().cpu().numpy()
    return numpy.ascontiguousarray(a)


# C++
yukarin_autoreg_cpp.initialize(
    graph_length=graph_length,
    max_batch_size=max_batch_size,
    local_size=local_size,
    hidden_size=config.hidden_size,
    embedding_size=config.embedding_size,
    linear_hidden_size=config.linear_hidden_size,
    output_size=2**config.bit_size,
    **params,
)

before_output = None
for batch_size in [1, 2, 4]:
    x = base_x[:batch_size].clone()
    l_array = base_l_array[:, :batch_size].clone()
    hidden = base_hidden[:batch_size].clone()

    # x = model.xp.zeros_like(x)
    # l_array = model.xp.zeros_like(l_array)
    # hidden = model.xp.zeros_like(hidden)
コード例 #4
0
def to_numpy(a):
    if isinstance(a, cupy.ndarray):
        a = cupy.asnumpy(a)
    return numpy.ascontiguousarray(a)

# C++
yukarin_autoreg_cpp.initialize(
    graph_length=graph_length,
    max_batch_size=max_batch_size,
    local_size=local_size,
    hidden_size=config.hidden_size,
    embedding_size=config.embedding_size,
    linear_hidden_size=config.linear_hidden_size,
    output_size=2 ** config.bit_size,
    x_embedder_W=to_numpy(params['x_embedder_W']),
    gru_xw=to_numpy(params['gru_xw']),
    gru_xb=to_numpy(params['gru_xb']),
    gru_hw=to_numpy(params['gru_hw']),
    gru_hb=to_numpy(params['gru_hb']),
    O1_W=to_numpy(params['O1_W']),
    O1_b=to_numpy(params['O1_b']),
    O2_W=to_numpy(params['O2_W']),
    O2_b=to_numpy(params['O2_b']),
)

before_output = None
for batch_size in [1, 2, 4]:
    x = model.xp.copy(base_x[:batch_size])
    l_array = model.xp.copy(base_l_array[:, :batch_size])
    hidden = model.xp.copy(base_hidden[:batch_size])