Example #1
0
import lpcnet
import sys
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from ulaw import ulaw2lin, lin2ulaw
import keras.backend as K
import h5py

import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
set_session(tf.Session(config=config))

model, enc, dec = lpcnet.new_lpcnet_model()

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()

feature_file = sys.argv[1]
out_file = sys.argv[2]
frame_size = 160
nb_features = 55
nb_used_features = model.nb_used_features

features = np.fromfile(feature_file, dtype='float32')
features = np.resize(features, (-1, nb_features))
nb_frames = 1
feature_chunk_size = features.shape[0]
pcm_chunk_size = frame_size*feature_chunk_size
Example #2
0
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()

# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time.  Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.44

set_session(tf.Session(config=config))

nb_epochs = 120

# Try reducing batch_size if you run out of memory on your GPU
batch_size = 64

model, _, _ = lpcnet.new_lpcnet_model(training=True)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
model.summary()

feature_file = sys.argv[1]
pcm_file = sys.argv[2]  # 16 bit unsigned short PCM samples
frame_size = model.frame_size
nb_features = 55
nb_used_features = model.nb_used_features
feature_chunk_size = 15
pcm_chunk_size = frame_size * feature_chunk_size

# u for unquantised, load 16 bit PCM samples and convert to mu-law
Example #3
0
    max_conv_inputs = max(max_conv_inputs, weights[0].shape[1]*weights[0].shape[0])
    f.write(struct.pack('iiii', weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], Activations[activation]))
Conv1D.dump_layer = dump_conv1d_layer

def dump_embedding_layer_impl(name, weights, f):
    printVector(f, weights, name + '_weights')
    f.write(struct.pack('ii', weights.shape[0], weights.shape[1]))

def dump_embedding_layer(self, f):
    name = self.name
    print("printing layer " + name + " of type " + self.__class__.__name__)
    weights = self.get_weights()[0]
    dump_embedding_layer_impl(name, weights, f)
Embedding.dump_layer = dump_embedding_layer

model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=384, use_gpu=False)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])

model.load_weights(sys.argv[1])

bf = open('nnet_data.bin', 'wb')

embed_size = lpcnet.embed_size

E = model.get_layer('embed_sig').get_weights()[0]
W = model.get_layer('gru_a').get_weights()[0][:embed_size,:]
dump_embedding_layer_impl('gru_a_embed_sig', np.dot(E, W), bf)
W = model.get_layer('gru_a').get_weights()[0][embed_size:2*embed_size,:]
dump_embedding_layer_impl('gru_a_embed_pred', np.dot(E, W), bf)
W = model.get_layer('gru_a').get_weights()[0][2*embed_size:3*embed_size,:]
dump_embedding_layer_impl('gru_a_embed_exc', np.dot(E, W), bf)
Example #4
0
import h5py
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session

config = tf.compat.v1.ConfigProto()
# Enable GTX 1660 and Tesla T4 to be used for training (possibly many other models too)
# https://devtalk.nvidia.com/default/topic/1048627/cuda-setup-and-installation/does-the-latest-gtx-1660-model-support-cuda-/
config.gpu_options.allow_growth = True
# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time.  Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.44

set_session(tf.compat.v1.Session(config=config))

model, _, _ = lpcnet.new_lpcnet_model(training=True,
                                      use_gpu=defs['use_gpu'],
                                      rnn_units1=defs['rnn_units1'],
                                      rnn_units2=defs['rnn_units2'])
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
model.summary()

frame_size = model.frame_size
nb_features = 55
nb_used_features = model.nb_used_features
feature_chunk_size = 15
pcm_chunk_size = frame_size * feature_chunk_size

# try to create the checkpoint directory
if not os.path.exists(defs['checkpoints_dir']):
    os.makedirs(defs['checkpoints_dir'])
Example #5
0
# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time.  Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.83
config.gpu_options.allocator_type = 'BFC' #A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc.
config.gpu_options.allow_growth = False
config.allow_soft_placement = True
set_session(tf.Session(config=config))

init_epoch = 0
nb_epochs = 100

# Try reducing batch_size if you run out of memory on your GPU
batch_size = 256

#with tf.device("/gpu:0"):
model, _, _ = lpcnet.new_lpcnet_model(training=True, use_gpu=True)

#model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.summary()

# with tf.device("/gpu:0"):
if True:
    feature_file = sys.argv[1]
    pcm_file = sys.argv[2]     # 16 bit unsigned short PCM samples
    frame_size = model.frame_size
    nb_features = 55
    nb_used_features = model.nb_used_features
    feature_chunk_size = 15
    pcm_chunk_size = frame_size*feature_chunk_size

    # u for unquantised, load 16 bit PCM samples and convert to mu-law
Example #6
0
import os as os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()

# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time.  Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.9

set_session(tf.Session(config=config))

nb_epochs = 120

# Try reducing batch_size if you run out of memory on your GPU
batch_size = 64
model, _, _ = lpcnet.new_lpcnet_model()

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
model.summary()

feature_file = sys.argv[1]
pcm_file = sys.argv[2]  # 16 bit unsigned short PCM samples
frame_size = 160
nb_features = 55
nb_used_features = model.nb_used_features
feature_chunk_size = 15
pcm_chunk_size = frame_size * feature_chunk_size

# u for unquantised, load 16 bit PCM samples and convert to mu-law
Example #7
0
import lpcnet
import sys
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from ulaw import ulaw2lin, lin2ulaw
import keras.backend as K
import h5py

import tensorflow as tf
from tensorflow.compat.v1.keras.backend import set_session
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
set_session(tf.compat.v1.Session(config=config))

model, enc, dec = lpcnet.new_lpcnet_model(use_gpu=False)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
#model.summary()

feature_file = sys.argv[1]
out_file = sys.argv[2]
frame_size = model.frame_size
nb_features = 55
nb_used_features = model.nb_used_features

features = np.fromfile(feature_file, dtype='float32')
features = np.resize(features, (-1, nb_features))
nb_frames = 1
Example #8
0
# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time.  Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.44

set_session(tf.Session(config=config))

nb_epochs = 120

# Try reducing batch_size if you run out of memory on your GPU
batch_size = 32

# model params
frame_process_format = sys.argv[3]
up_sampling_format = sys.argv[4]
model, _, _ = lpcnet.new_lpcnet_model(
    training=True,
    frame_process_format=frame_process_format,
    up_sampling_format=up_sampling_format)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
model.summary()

feature_file = sys.argv[1]
pcm_file = sys.argv[2]  # 16 bit unsigned short PCM samples
frame_size = model.frame_size
nb_features = 55
nb_used_features = model.nb_used_features
feature_chunk_size = 15
pcm_chunk_size = frame_size * feature_chunk_size
Example #9
0
    printVector(f, weights, name + '_weights')
    f.write('const EmbeddingLayer {} = {{\n   {}_weights,\n   {}, {}\n}};\n\n'
            .format(name, name, weights.shape[0], weights.shape[1]))
    hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
    hf.write('extern const EmbeddingLayer {};\n\n'.format(name));

def dump_embedding_layer(self, f, hf):
    name = self.name
    print("printing layer " + name + " of type " + self.__class__.__name__)
    weights = self.get_weights()[0]
    dump_embedding_layer_impl(name, weights, f, hf)
    return False
Embedding.dump_layer = dump_embedding_layer


model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=defs['rnn_units1'], use_gpu=False)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()

model.load_weights(sys.argv[1])

if len(sys.argv) > 2:
    cfile = sys.argv[2];
    hfile = sys.argv[3];
else:
    cfile = 'model.c'
    hfile = 'model.h'


f = open(cfile, 'w')
hf = open(hfile, 'w')
Example #10
0
    name = self.name
    print("printing layer " + name + " of type " + self.__class__.__name__)
    weights = self.get_weights()[0]
    dump_embedding_layer_impl(name, weights, f, hf)
    return False
Embedding.dump_layer = dump_embedding_layer
diff_Embed.dump_layer = dump_embedding_layer

filename = sys.argv[1]
with h5py.File(filename, "r") as f:
    units = min(f['model_weights']['gru_a']['gru_a']['recurrent_kernel:0'].shape)
    units2 = min(f['model_weights']['gru_b']['gru_b']['recurrent_kernel:0'].shape)
    cond_size = min(f['model_weights']['feature_dense1']['feature_dense1']['kernel:0'].shape)
    e2e = 'rc2lpc' in f['model_weights']

model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=units, rnn_units2=units2, flag_e2e = e2e, cond_size=cond_size)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()

model.load_weights(filename, by_name=True)

if len(sys.argv) > 2:
    cfile = sys.argv[2];
    hfile = sys.argv[3];
else:
    cfile = 'nnet_data.c'
    hfile = 'nnet_data.h'


f = open(cfile, 'w')
hf = open(hfile, 'w')
Example #11
0
    hf.write('extern const EmbeddingLayer {};\n\n'.format(name));

def dump_embedding_layer(self, f, hf):
    name = self.name
    print("printing layer " + name + " of type " + self.__class__.__name__)
    weights = self.get_weights()[0]
    dump_embedding_layer_impl(name, weights, f, hf)
    return False
Embedding.dump_layer = dump_embedding_layer

filename = sys.argv[1]
with h5py.File(filename, "r") as f:
    units = min(f['model_weights']['gru_a']['gru_a']['recurrent_kernel:0'].shape)
    units2 = min(f['model_weights']['gru_b']['gru_b']['recurrent_kernel:0'].shape)

model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=units, rnn_units2=units2)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()

model.load_weights(filename)

if len(sys.argv) > 2:
    cfile = sys.argv[2];
    hfile = sys.argv[3];
else:
    cfile = 'nnet_data.c'
    hfile = 'nnet_data.h'


f = open(cfile, 'w')
hf = open(hfile, 'w')
Example #12
0
from keras.callbacks import ModelCheckpoint
from ulaw import ulaw2lin, lin2ulaw
import keras.backend as K
import h5py

import tensorflow as tf
from keras.backend.tensorflow_backend import set_session

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
set_session(tf.Session(config=config))

frame_process_format = sys.argv[3]
up_sampling_format = sys.argv[4]
model, enc, dec = lpcnet.new_lpcnet_model(
    use_gpu=False,
    frame_process_format=frame_process_format,
    up_sampling_format=up_sampling_format)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
#model.summary()

feature_file = sys.argv[1]
out_file = sys.argv[2]
frame_size = model.frame_size
nb_features = 55
nb_used_features = model.nb_used_features

features = np.fromfile(feature_file, dtype='float32')
features = np.resize(features, (-1, nb_features))
Example #13
0
                    type=int,
                    default=160,
                    help='frames size in samples')
parser.add_argument('--epochs',
                    type=int,
                    default=20,
                    help='Number of training epochs')
parser.add_argument('--no_pitch_embedding',
                    action='store_true',
                    help='disable pitch embedding')
parser.add_argument('--load_h5', help='disable pitch embedding')
args = parser.parse_args()

nb_epochs = args.epochs

model, _, _ = lpcnet.new_lpcnet_model(frame_size=args.frame_size,
                                      training=True)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
model.summary()

if args.load_h5:
    print("loading: %s" % (args.load_h5))
    model.load_weights(args.load_h5)

feature_file = args.feature_file
pcm_file = args.packed_ulaw_file
prefix = args.prefix
frame_size = model.frame_size
nb_used_features = model.nb_used_features
Example #14
0
from ulaw import ulaw2lin, lin2ulaw
import h5py

filename = sys.argv[1]
with h5py.File(filename, "r") as f:
    units = min(
        f['model_weights']['gru_a']['gru_a']['recurrent_kernel:0'].shape)
    units2 = min(
        f['model_weights']['gru_b']['gru_b']['recurrent_kernel:0'].shape)
    cond_size = min(f['model_weights']['feature_dense1']['feature_dense1']
                    ['kernel:0'].shape)
    e2e = 'rc2lpc' in f['model_weights']

model, enc, dec = lpcnet.new_lpcnet_model(training=False,
                                          rnn_units1=units,
                                          rnn_units2=units2,
                                          flag_e2e=e2e,
                                          cond_size=cond_size,
                                          batch_size=1)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['sparse_categorical_accuracy'])
#model.summary()

feature_file = sys.argv[2]
out_file = sys.argv[3]
frame_size = model.frame_size
nb_features = 36
nb_used_features = model.nb_used_features

features = np.fromfile(feature_file, dtype='float32')