コード例 #1
0
 def __init__(self, n_loop, n_layer, a_channels, r_channels, s_channels, use_embed_tanh):
     super().__init__()
     self.save_hyperparameters()
     self.encoder = UpsampleNet(
         n_loop*n_layer,
         r_channels)
     self.decoder = WaveNet(
         n_loop,
         n_layer,
         a_channels,
         r_channels,
         s_channels,
         use_embed_tanh)
コード例 #2
0
ファイル: generate.py プロジェクト: tohmae/chainer
    use_gpu = True
    chainer.cuda.get_device_from_id(args.gpu).use()
else:
    use_gpu = False

# Preprocess
_, condition, _ = Preprocess(
    sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
    length=None, quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
    args.n_loop, args.n_layer,
    args.a_channels, args.r_channels, args.s_channels,
    args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(
    args.model, encoder, 'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(
    args.model, decoder, 'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
if use_gpu:
    x = chainer.cuda.to_gpu(x, device=args.gpu)
    condition = chainer.cuda.to_gpu(condition, device=args.gpu)
    encoder.to_gpu(device=args.gpu)
    decoder.to_gpu(device=args.gpu)
x = chainer.Variable(x)
コード例 #3
0
ファイル: train.py プロジェクト: zghzdxs/chainer
# Datasets
if not os.path.isdir(args.dataset):
    raise RuntimeError('Dataset directory not found: {}'.format(args.dataset))
paths = sorted([
    str(path) for path in pathlib.Path(args.dataset).glob('wav48/*/*.wav')])
preprocess = Preprocess(
    sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
    length=args.length, quantize=args.a_channels)
dataset = chainer.datasets.TransformDataset(paths, preprocess)
train, valid = chainer.datasets.split_dataset_random(
    dataset, int(len(dataset) * 0.9), args.seed)

# Networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
    args.n_loop, args.n_layer,
    args.a_channels, args.r_channels, args.s_channels,
    args.use_embed_tanh)
model = chainer.links.Classifier(EncoderDecoderModel(encoder, decoder))

# Optimizer
optimizer = chainer.optimizers.Adam(1e-4)
optimizer.setup(model)

# Iterators
train_iter = chainer.iterators.MultiprocessIterator(
    train, args.batchsize,
    n_processes=args.process, n_prefetch=args.prefetch)
valid_iter = chainer.iterators.MultiprocessIterator(
    valid, args.batchsize, repeat=False, shuffle=False,
    n_processes=args.process, n_prefetch=args.prefetch)
コード例 #4
0
    chainer.global_config.autotune = True

# Preprocess
_, condition, _ = Preprocess(sr=16000,
                             n_fft=1024,
                             hop_length=256,
                             n_mels=128,
                             top_db=20,
                             length=None,
                             quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(args.n_loop, args.n_layer, args.a_channels, args.r_channels,
                  args.s_channels, args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(args.model, encoder,
                             'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(args.model, decoder,
                             'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
x = device.send(x)
condition = device.send(condition)
encoder.to_device(device)
decoder.to_device(device)
x = chainer.Variable(x)
condition = chainer.Variable(condition)
conditions = encoder(condition)
コード例 #5
0
ファイル: generate.py プロジェクト: asi1024/chainer
device.use()

if device.xp is chainer.backends.cuda.cupy:
    chainer.global_config.autotune = True

# Preprocess
_, condition, _ = Preprocess(
    sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
    length=None, quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
    args.n_loop, args.n_layer,
    args.a_channels, args.r_channels, args.s_channels,
    args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(
    args.model, encoder, 'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(
    args.model, decoder, 'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
x = device.send(x)
condition = device.send(condition)
encoder.to_device(device)
decoder.to_device(device)
x = chainer.Variable(x)
condition = chainer.Variable(condition)
コード例 #6
0
        yield output, target


if __name__ == '__main__':
    args = parseInput()
    prefix = args.event + args.channel + str(args.freq)
    fileCheck(prefix)
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
    gpus = tf.config.experimental.list_physical_devices('GPU')
    print('number of gpus: ', len(gpus))

    if args.save_file:
        stdoutOrigin = sys.stdout
        sys.stdout = open(args.output + '.txt', 'w')

    model = WaveNet(args.num_residuals, args.num_filters)
    if args.model_path is not None:
        model.load_weights(args.model_path)
    model.build((None, 8192, 1))
    optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
    criteria = tf.keras.losses.MeanSquaredError()

    losses = []
    maxSNR = np.linspace(1.75, 1.0, args.epoch)
    for epoch in range(args.epoch):
        train_dataset = tf.data.Dataset.from_generator(
            generator, (tf.float64, tf.float64), ((8192, 1), 8192),
            (args.train_file, prefix, args.blank_ratio, (0.5, maxSNR[epoch])))
        train_dataset = train_dataset.shuffle(buffer_size=9861).batch(
            args.batch_size)
        for (batch_n, (input, target)) in enumerate(train_dataset):