예제 #1
0
 def __init__(self, n_loop, n_layer, a_channels, r_channels, s_channels, use_embed_tanh):
     super().__init__()
     self.save_hyperparameters()
     self.encoder = UpsampleNet(
         n_loop*n_layer,
         r_channels)
     self.decoder = WaveNet(
         n_loop,
         n_layer,
         a_channels,
         r_channels,
         s_channels,
         use_embed_tanh)
예제 #2
0
파일: train.py 프로젝트: zghzdxs/chainer
    chainer.global_config.autotune = True

# Datasets
if not os.path.isdir(args.dataset):
    raise RuntimeError('Dataset directory not found: {}'.format(args.dataset))
paths = sorted([
    str(path) for path in pathlib.Path(args.dataset).glob('wav48/*/*.wav')])
preprocess = Preprocess(
    sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
    length=args.length, quantize=args.a_channels)
dataset = chainer.datasets.TransformDataset(paths, preprocess)
train, valid = chainer.datasets.split_dataset_random(
    dataset, int(len(dataset) * 0.9), args.seed)

# Networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
    args.n_loop, args.n_layer,
    args.a_channels, args.r_channels, args.s_channels,
    args.use_embed_tanh)
model = chainer.links.Classifier(EncoderDecoderModel(encoder, decoder))

# Optimizer
optimizer = chainer.optimizers.Adam(1e-4)
optimizer.setup(model)

# Iterators
train_iter = chainer.iterators.MultiprocessIterator(
    train, args.batchsize,
    n_processes=args.process, n_prefetch=args.prefetch)
valid_iter = chainer.iterators.MultiprocessIterator(
예제 #3
0
파일: generate.py 프로젝트: tohmae/chainer
if args.gpu != -1:
    chainer.global_config.autotune = True
    use_gpu = True
    chainer.cuda.get_device_from_id(args.gpu).use()
else:
    use_gpu = False

# Preprocess
_, condition, _ = Preprocess(
    sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
    length=None, quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
    args.n_loop, args.n_layer,
    args.a_channels, args.r_channels, args.s_channels,
    args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(
    args.model, encoder, 'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(
    args.model, decoder, 'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
if use_gpu:
    x = chainer.cuda.to_gpu(x, device=args.gpu)
    condition = chainer.cuda.to_gpu(condition, device=args.gpu)
예제 #4
0
# preprocess
n = 1  # batchsize; now suporrts only 1
inputs = Preprocess(params.sr, params.n_fft, params.hop_length, params.n_mels,
                    params.top_db, None, params.categorical_output_dim)(path)

_, condition, _ = inputs
if params.categorical_output_dim is False or params.categorical_output_dim is None:
    input_dim = 1
else:
    input_dim = categorical_output_dim
x = numpy.zeros([n, input_dim, 1, 1], dtype=numpy.float32)
condition = numpy.expand_dims(condition, axis=0)

# make model
encoder = UpsampleNet(params.upsample_factors)
decoder = WaveNet(params.n_loop, params.n_layer, params.filter_size,
                  params.residual_channels, params.dilated_channels,
                  params.skip_channels, params.output_dim, params.quantize,
                  params.log_scale_min, params.condition_dim,
                  params.dropout_zero_rate)

# load trained parameter
chainer.serializers.load_npz(args.model, encoder,
                             'updater/model:main/encoder/')
chainer.serializers.load_npz(args.model, decoder,
                             'updater/model:main/decoder/')

if args.gpu >= 0:
    use_gpu = True
    chainer.cuda.get_device_from_id(args.gpu).use()
예제 #5
0
train, valid = chainer.datasets.split_dataset_random(dataset,
                                                     int(len(dataset) * 0.9),
                                                     params.split_seed)

# make directory of results
result = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
os.mkdir(result)
shutil.copy(__file__, os.path.join(result, __file__))
shutil.copy('utils.py', os.path.join(result, 'utils.py'))
shutil.copy('params.py', os.path.join(result, 'params.py'))
shutil.copy('generate.py', os.path.join(result, 'generate.py'))
shutil.copy('net.py', os.path.join(result, 'net.py'))
shutil.copytree('WaveNet', os.path.join(result, 'WaveNet'))

# Model
encoder = UpsampleNet(params.upsample_factors)
decoder = WaveNet(params.n_loop, params.n_layer, params.filter_size,
                  params.residual_channels, params.dilated_channels,
                  params.skip_channels, params.output_dim, params.quantize,
                  params.log_scale_min, params.condition_dim,
                  params.dropout_zero_rate)

if params.distribution_type == 'gaussian':
    loss_fun = decoder.calculate_gaussian_loss
    acc_fun = None
elif params.distribution_type == 'logistic':
    loss_fun = decoder.calculate_logistic_loss
    acc_fun = None
elif params.distribution_type == 'softmax':
    loss_fun = chainer.functions.softmax_cross_entropy
    acc_fun = chainer.functions.accuracy
예제 #6
0
if device.xp is chainer.backends.cuda.cupy:
    chainer.global_config.autotune = True

# Preprocess
_, condition, _ = Preprocess(sr=16000,
                             n_fft=1024,
                             hop_length=256,
                             n_mels=128,
                             top_db=20,
                             length=None,
                             quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(args.n_loop, args.n_layer, args.a_channels, args.r_channels,
                  args.s_channels, args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(args.model, encoder,
                             'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(args.model, decoder,
                             'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
x = device.send(x)
condition = device.send(condition)
encoder.to_device(device)
decoder.to_device(device)
x = chainer.Variable(x)
예제 #7
0
                                                     int(len(dataset) * 0.9),
                                                     params.split_seed)

# make directory of results
result = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
os.mkdir(result)
shutil.copy(__file__, os.path.join(result, __file__))
shutil.copy('utils.py', os.path.join(result, 'utils.py'))
shutil.copy('params.py', os.path.join(result, 'params.py'))
shutil.copy('teacher_params.py', os.path.join(result, 'teacher_params.py'))
shutil.copy('generate.py', os.path.join(result, 'generate.py'))
shutil.copy('net.py', os.path.join(result, 'net.py'))
shutil.copytree('WaveNet', os.path.join(result, 'WaveNet'))

# Model
encoder = UpsampleNet(teacher_params.upsample_factors)
teacher = WaveNet(teacher_params.n_loop, teacher_params.n_layer,
                  teacher_params.filter_size, teacher_params.residual_channels,
                  teacher_params.dilated_channels,
                  teacher_params.skip_channels, teacher_params.output_dim,
                  teacher_params.quantize, teacher_params.log_scale_min,
                  teacher_params.condition_dim,
                  teacher_params.dropout_zero_rate)
student = ParallelWaveNet(params.n_loops, params.n_layers, params.filter_size,
                          params.residual_channels, params.dilated_channels,
                          params.skip_channels, params.condition_dim,
                          params.dropout_zero_rate)

chainer.serializers.load_npz(params.model, encoder,
                             'updater/model:main/encoder/')
chainer.serializers.load_npz(params.model, teacher,
예제 #8
0
파일: generate.py 프로젝트: asi1024/chainer
device = chainer.get_device(args.device)
device.use()

if device.xp is chainer.backends.cuda.cupy:
    chainer.global_config.autotune = True

# Preprocess
_, condition, _ = Preprocess(
    sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
    length=None, quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
    args.n_loop, args.n_layer,
    args.a_channels, args.r_channels, args.s_channels,
    args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(
    args.model, encoder, 'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(
    args.model, decoder, 'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
x = device.send(x)
condition = device.send(condition)
encoder.to_device(device)
예제 #9
0
else:
    use_gpu = False

# Preprocess
_, condition, _ = Preprocess(sr=16000,
                             n_fft=1024,
                             hop_length=256,
                             n_mels=128,
                             top_db=20,
                             length=None,
                             quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)

# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(args.n_loop, args.n_layer, args.a_channels, args.r_channels,
                  args.s_channels, args.use_embed_tanh)

# Load trained parameters
chainer.serializers.load_npz(args.model, encoder,
                             'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(args.model, decoder,
                             'updater/model:main/predictor/decoder/')

# Non-autoregressive generate
if use_gpu:
    x = chainer.cuda.to_gpu(x, device=args.gpu)
    condition = chainer.cuda.to_gpu(condition, device=args.gpu)
    encoder.to_gpu(device=args.gpu)
    decoder.to_gpu(device=args.gpu)
예제 #10
0
train, valid = chainer.datasets.split_dataset_random(dataset,
                                                     int(len(dataset) * 0.9),
                                                     params.split_seed)

# make directory of results
result = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
os.mkdir(result)
shutil.copy(__file__, os.path.join(result, __file__))
shutil.copy('utils.py', os.path.join(result, 'utils.py'))
shutil.copy('params.py', os.path.join(result, 'params.py'))
shutil.copy('generate.py', os.path.join(result, 'generate.py'))
shutil.copy('net.py', os.path.join(result, 'net.py'))
shutil.copytree('WaveNet', os.path.join(result, 'WaveNet'))

# Model
encoder = UpsampleNet(params.channels, params.upsample_factors)
wavenet = WaveNet(params.n_loop, params.n_layer, params.filter_size,
                  params.input_dim, params.residual_channels,
                  params.dilated_channels, params.skip_channels,
                  params.quantize, params.use_logistic, params.n_mixture,
                  params.log_scale_min, params.condition_dim,
                  params.dropout_zero_rate)

if params.ema_mu < 1:
    decoder = ExponentialMovingAverage(wavenet, params.ema_mu)
else:
    decoder = wavenet

if params.use_logistic:
    loss_fun = wavenet.calculate_logistic_loss
    acc_fun = None