コード例 #1
0
ファイル: dsp.py プロジェクト: koson/stm32-mcu
def play(rate=None, *args, **kwargs):
    if rate is None:
        rate = FS
    display(Audio(rate=rate, *args, **kwargs))
コード例 #2
0
 def ipy_audio(self):
     if self.sig is None: self._check_signal()
     return Audio(data=self.sig, rate=self.sr)
コード例 #3
0
ファイル: signal.py プロジェクト: mikful/fastai2_audio
 def hear(self):
     display(Audio(self, rate=self.sr))
コード例 #4
0
def play_wav(wav_file):
    return Audio(wav_file)
コード例 #5
0
def main():
    # Configurations
    # Build a model
    os.environ["KERAS_BACKEND"] = "tensorflow"
    # so try to estimate next sample afte given (maxlen) samples
    maxlen = 256  # 256/44100 = 0.012s AKA framesize
    #nb_output = 256  # resolution - 8bit encoding - output of hidden layers?
    nb_output = 2  # 2-dim mfcc data
    #latent_dim = 128 #dimensionality of the output space
    latent_dim = 2048  #hidden dimension I think

    #1. Preprocess Data
    samples, next_sample = convert_to_tensor(maxlen, nb_output, latent_dim)

    #2. Define network
    model = define_network(maxlen, nb_output, latent_dim)

    #3. Train network
    csv_logger = CSVLogger('training_audio.log')
    escb = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
    checkpoint = ModelCheckpoint(
        "models/audio-{epoch:02d}-{val_loss:.2f}.hdf5",
        monitor='val_loss',
        save_best_only=True,
        verbose=1)  #, period=2)

    model.fit(
        samples,
        next_sample,
        shuffle=True,
        batch_size=256,
        verbose=1,  #initial_epoch=50,
        validation_split=0.3,
        nb_epoch=500,
        callbacks=[csv_logger, escb, checkpoint])

    #matplotlib inline
    print "Training history"
    fig = plt.figure(figsize=(10, 4))
    ax1 = fig.add_subplot(1, 2, 1)
    plt.plot(model.history.history['loss'])
    ax1.set_title('loss')
    ax2 = fig.add_subplot(1, 2, 2)
    plt.plot(model.history.history['val_loss'])
    ax2.set_title('validation loss')

    ###### BELOW IS REDUNDANT IN TRAINING PHASE ########
    #Below just for plotting train history
    seqA = []
    for start in range(5000, 220000, 10000):
        seq = y[start:maxlen]
        seq_matrix = np.zeros((maxlen, nb_output), dtype=bool)
        for i, s in enumerate(seq):
            sample_ = int(s * (nb_output - 1))  # 0-255
            seq_matrix[i, sample_] = True

        for i in tqdm(range(5000)):
            z = model.predict(seq_matrix.reshape((1, maxlen, nb_output)))
            s = sample(z[0], 1.0)
            seq = np.append(seq, s)

            sample_ = int(s * (nb_output - 1))
            seq_vec = np.zeros(nb_output, dtype=bool)
            seq_vec[sample_] = True

            seq_matrix = np.vstack(
                (seq_matrix, seq_vec))  # added generated note info
            seq_matrix = seq_matrix[1:]

        # scale back
        seq = seq * (max_y - min_y) + min_y

        # plot
        plt.figure(figsize=(30, 5))
        plt.plot(seq.transpose())
        plt.show()

        display(Audio(seq, rate=sr))
        print seq
        seqA.append(seq)
        #join seq data

    seqA2 = np.hstack(seqA)
    librosa.output.write_wav('data1crop4_predictwav', seqA2, sr)
コード例 #6
0
for i in range(rl1):  
  if(rifle[i][0].shape[0]==22200):
    fx=np.concatenate((np.array([-1.0]*150),rifle[i][0],np.array([-1.0]*150)),axis=0)
    rifle_arr.append(fx)
  elif(rifle[i][0].shape[0]==21624):
    fx=np.concatenate((np.array([-1.0]*438),rifle[i][0],np.array([-1.0]*438)),axis=0)
    rifle_arr.append(fx)
        
  label.append(1)

from IPython.display import Audio

# got the gun and rifle
audio = gun_arr[0]

Audio(audio,rate=22500)

#librosa.feature.chroma_stft(audio,sr=22500),librosa.feature.chroma_cqt(audio,sr=22500),
def feautre_vc(audio):
  return np.concatenate((librosa.feature.chroma_cens(audio,sr=22500),
               librosa.feature.mfcc(audio,sr=22500,n_mfcc=44),librosa.feature.rms(audio), librosa.feature.rmse(audio), librosa.feature.spectral_centroid(audio,sr=22500),librosa.feature.melspectrogram(y=audio,sr=22500),
               librosa.feature.spectral_bandwidth(audio,sr=22500),librosa.feature.spectral_contrast(audio,sr=22500),librosa.feature.spectral_flatness(audio),librosa.feature.spectral_rolloff(audio,sr=22500),
               librosa.feature.poly_features(audio,sr=22500),librosa.feature.tonnetz(audio,sr=22500),librosa.feature.zero_crossing_rate(audio)),axis=0)

def take_pca(train,test):
  
  # capture shape
  ts=test.shape
  tr=train.shape
  
  # reshape
コード例 #7
0
 def make_audio(self):
     """Makes an IPython Audio object.
     """
     audio = Audio(data=self.ys.real, rate=self.framerate)
     return audio
コード例 #8
0
def allDone():
    display(
        Audio(
            url=
            'https://sound.peal.io/ps/audios/000/000/537/original/woo_vu_luvub_dub_dub.wav',
            autoplay=True))
コード例 #9
0
ファイル: ecf.py プロジェクト: Crispy13/crispy
def sound_alert(audio_path=sound_path + "/sc2-psh-rc.mp3", **kwargs):
    display(Audio(audio_path, autoplay=True))
コード例 #10
0
def main():
    audio_filename = '369148__flying-deer-fx__music-box-the-flea-waltz.wav'

    sr = 8000
    y, _ = librosa.load(audio_filename, sr=sr, mono=True)
    print y.shape
    print y
    print len(y)

    min_y = np.min(y)
    max_y = np.max(y)

    # normalize
    y = (y - min_y) / (max_y - min_y)
    print y.dtype, min_y, max_y

    Audio(y, rate=sr)

    #matplotlib inline
    plt.figure(figsize=(30,5))
    plt.plot(y[20000:20128].transpose())
    plt.show()

    # Build a model
    os.environ["KERAS_BACKEND"] = "tensorflow"

    # so try to estimate next sample afte given (maxlen) samples
    maxlen     = 128 # 128 / sr = 0.016 sec
    nb_output = 256  # resolution - 8bit encoding
    latent_dim = 128 

    inputs = Input(shape=(maxlen, nb_output))
    x = LSTM(latent_dim, return_sequences=True)(inputs)
    x = Dropout(0.2)(x)
    x = LSTM(latent_dim)(x)
    x = Dropout(0.2)(x)
    output = Dense(nb_output, activation='softmax')(x)
    model = Model(inputs, output)

    #optimizer = Adam(lr=0.005)
    optimizer = RMSprop(lr=0.01) 
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    # try to estimate next_sample (0 -255) based on 256 previous samples 
    step = 5
    next_sample = []
    samples = []
    for j in tqdm(range(0, y.shape[0] - maxlen, step)):
        seq = y[j: j + maxlen + 1]  
        seq_matrix = np.zeros((maxlen, nb_output), dtype=bool) 
        for i,s in enumerate(seq):
            sample_ = int(s * (nb_output - 1)) # 0-255
            if i < maxlen:
                seq_matrix[i, sample_] = True
            else:
                seq_vec = np.zeros(nb_output, dtype=bool)
                seq_vec[sample_] = True
                next_sample.append(seq_vec)
        samples.append(seq_matrix)
    samples = np.array(samples, dtype=bool)
    next_sample = np.array(next_sample, dtype=bool)
    print samples.shape, next_sample.shape


    csv_logger = CSVLogger('training_audio.log')
    escb = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
    checkpoint = ModelCheckpoint("models/audio-{epoch:02d}-{val_loss:.2f}.hdf5", monitor='val_loss', verbose=1, period=2)

    model.fit(samples, next_sample, shuffle=True, batch_size=256, verbose=1, #initial_epoch=50,
              validation_split=0.1, nb_epoch=500, callbacks=[csv_logger, escb, checkpoint])

    #matplotlib inline
    print "Training history"
    fig = plt.figure(figsize=(10,4))
    ax1 = fig.add_subplot(1, 2, 1)
    plt.plot(model.history.history['loss'])
    ax1.set_title('loss')
    ax2 = fig.add_subplot(1, 2, 2)
    plt.plot(model.history.history['val_loss'])
    ax2.set_title('validation loss')
         
    seqA = []
    for start in range(5000,220000,10000):
        seq = y[start: maxlen]  
        seq_matrix = np.zeros((maxlen, nb_output), dtype=bool) 
        for i,s in enumerate(seq):
            sample_ = int(s * (nb_output - 1)) # 0-255
            seq_matrix[i, sample_] = True

        for i in tqdm(range(5000)):
            z = model.predict(seq_matrix.reshape((1,maxlen,nb_output)))
            s = sample(z[0], 1.0)
            seq = np.append(seq, s)

            sample_ = int(s * (nb_output - 1))    
            seq_vec = np.zeros(nb_output, dtype=bool)
            seq_vec[sample_] = True

            seq_matrix = np.vstack((seq_matrix, seq_vec))  # added generated note info 
            seq_matrix = seq_matrix[1:]
            
        # scale back 
        seq = seq * (max_y - min_y) + min_y

        # plot
        plt.figure(figsize=(30,5))
        plt.plot(seq.transpose())
        plt.show()
        
        display(Audio(seq, rate=sr))
        print seq
        seqA.append(seq)
        #join seq data
    
    seqA2 = np.hstack(seqA)
    librosa.output.write_wav('data1_seq.wav', seqA2, sr)
コード例 #11
0
    # now performs separation
    estimates = {}
    for name, source in newM.items():  # 遍历所有声部,用mask分离出各个声部
        # compute soft mask as the ratio between source spectrogram and total
        Mask = newM[name] / model

        # multiply the mix by the mask
        Yj = Mask * X_origin

        # invert to time domain
        target_estimate = istft(Yj, nperseg=4096, noverlap=3072)[1].T

        # set this as the source estimate
        estimates[name] = target_estimate

    return estimates


estimates = estimateSpectro(X_origin, newM)

from IPython.display import Audio, display

for target, estimate in estimates.items():
    display(Audio(estimate.T, rate=track[0].rate))

display(Audio(track[0].audio.T, rate=track[0].rate))

import museval

track_scores = museval.eval_mus_track(track[0], estimates)
print(track_scores)
コード例 #12
0
def play_wav(wav_file):
    from IPython.display import Audio
    return Audio(wav_file)
コード例 #13
0
def _playsoundJupyter(sound, block=True):
    sound = Path(sound)
    sound = str(Path(get_ipython().home_dir, 'work', sound.name))
    audio = Audio(sound, autoplay=False)
    display(audio)
コード例 #14
0
def speak(my_text):
    with io.BytesIO() as f:
        gTTS(text=my_text, lang='en').write_to_fp(f)
        f.seek(0)
        return Audio(f.read(), autoplay=True)
コード例 #15
0
y_pred_class = np.argmax(y_pred,axis=1)
cnf_matrix = confusion_matrix(ytest, y_pred_class)
print(cnf_matrix)

print(classification_report(ytest, y_pred_class, target_names=classes))

#text = ["I am not happy with this movie"]
text = ['this looks stupid.', "I am unhappy with this movie", 'disgusting movie', 'f*****g bad movie', 'worst of all time', 'very emoional movie, i got tears', 'great movie']

from keras.preprocessing import sequence
sequences_test = tokenizer.texts_to_sequences(text)
#data_int_t = pad_sequences(sequences_test, padding='pre', maxlen=(max_length-5))
data_test = pad_sequences(sequences_test, padding='post', maxlen=(max_length))
y_prob = model.predict(data_test)
for n, prediction in enumerate(y_prob):
    pred = y_prob.argmax(axis=-1)[n]
    print(text[n],"\nPrediction:",classes[pred],"\n")

!pip install gTTS
from gtts import gTTS 
from IPython.display import Audio 
 
predtext = 'The emotion of sentence is ' + classes[pred]
language = 'en'

tts = gTTS(text=predtext, lang=language)
tts.save("emotion.mp3") 

Audio("emotion.mp3", autoplay=True)

コード例 #16
0
def main():
    audio_filename = 'rosbag_microwave.wav'
    #audio_filename = 'jsbach.wav'

    y, _ = librosa.load(audio_filename, mono=True)
    print y.shape
    print y
    print len(y)

    min_y = np.min(y)
    max_y = np.max(y)

    # normalize
    y = (y - min_y) / (max_y - min_y)
    print y.dtype, min_y, max_y

    Audio(y, rate=sr)

    #matplotlib inline
    plt.figure(figsize=(30, 5))
    plt.plot(y[20000:20128].transpose())
    plt.show()

    # Build a model
    os.environ["KERAS_BACKEND"] = "tensorflow"

    # so try to estimate next sample afte given (maxlen) samples
    maxlen = 128  # 128 / sr = 0.016 sec
    nb_output = 256  # resolution - 8bit encoding
    latent_dim = 128

    inputs = Input(shape=(maxlen, nb_output))
    x = LSTM(latent_dim, return_sequences=True)(inputs)
    x = Dropout(0.2)(x)
    x = LSTM(latent_dim)(x)
    x = Dropout(0.2)(x)
    output = Dense(nb_output, activation='softmax')(x)
    model = Model(inputs, output)

    #model.load_weights('/home/mpark/bagfiles/data_experiment_test/models/perhaps.hdf5')
    #optimizer = Adam(lr=0.005)
    optimizer = RMSprop(lr=0.01)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    #try to estimate next_sample (0 -255) based on 256 previous samples
    step = 5
    next_sample = []
    samples = []
    for j in tqdm(range(0, y.shape[0] - maxlen, step)):
        seq = y[j:j + maxlen + 1]
        seq_matrix = np.zeros((maxlen, nb_output), dtype=bool)
        for i, s in enumerate(seq):
            sample_ = int(s * (nb_output - 1))  # 0-255
            if i < maxlen:
                seq_matrix[i, sample_] = True
            else:
                seq_vec = np.zeros(nb_output, dtype=bool)
                seq_vec[sample_] = True
                next_sample.append(seq_vec)
        samples.append(seq_matrix)
    #print type(samples), len(samples)
    #print type(next_sample), len(next_sample)
    samples = np.array(samples, dtype=bool)
    next_sample = np.array(next_sample, dtype=bool)
    print samples.shape, next_sample.shape

    csv_logger = CSVLogger('training_audio.log')
    escb = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
    checkpoint = ModelCheckpoint(
        "models/audio-{epoch:02d}-{val_loss:.2f}.hdf5",
        monitor='val_loss',
        verbose=1,
        period=2)

    model.fit(
        samples,
        next_sample,
        shuffle=True,
        batch_size=256,
        verbose=1,  #initial_epoch=50,
        validation_split=0.1,
        nb_epoch=500,
        callbacks=[csv_logger, escb, checkpoint])

    #matplotlib inline
    print "Training history"
    fig = plt.figure(figsize=(10, 4))
    ax1 = fig.add_subplot(1, 2, 1)
    plt.plot(model.history.history['loss'])
    ax1.set_title('loss')
    ax2 = fig.add_subplot(1, 2, 2)
    plt.plot(model.history.history['val_loss'])
    ax2.set_title('validation loss')
コード例 #17
0
 def display(self):
     from IPython.display import Audio
     return Audio(data=self.array, rate=self.rate)
コード例 #18
0
import tensorflow as tf
from IPython.display import display, Audio
import numpy as np

# Load the graph
tf.reset_default_graph()
saver = tf.train.import_meta_graph('D:/VHD/infer/infer.meta')
graph = tf.get_default_graph()
sess = tf.InteractiveSession()
saver.restore(sess, 'D:/VHD/models/model.ckpt')

# Create 50 random latent vectors z
_z = (np.random.rand(50, 100) * 2.) - 1

# Synthesize G(z)
z = graph.get_tensor_by_name('z:0')
G_z = graph.get_tensor_by_name('G_z:0')
_G_z = sess.run(G_z, {z: _z})

# Play audio in notebook
display(Audio(_G_z[0, :, 0], rate=16000))
コード例 #19
0
ファイル: utils2.py プロジェクト: Corbin-A/fastai_part2
def beep():
    return Audio(filename='/home/jhoward/beep.mp3', autoplay=True)
コード例 #20
0
#
# args = hyperparameter()
# # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
# embedding = DeepVOX_GST_encoder(args.ref_audio_path, args.enc_model_fpath, sampling_rate=16000, n_channels=1, is_cmvn=True)
# embedding = fCNN_encoder(args.ref_audio_path, args.enc_model_fpath, sampling_rate=8000, n_channels=1, is_cmvn=True)
# embedding = DeepTalk_encoder(args.ref_audio_path, args.enc_model_fpath, args.enc_module_name, \
# preprocess=True, normalize=True, sampling_rate=args.sampling_rate, duration=None)
# # np.linalg.norm(embedding)
#
# synthesized_mel, breaks = DeepTalk_synthesizer(embedding, args.output_text, args.syn_model_dir, low_mem = args.low_mem)
# synthesized_wav = DeepTalk_vocoder(synthesized_mel, breaks, args.voc_model_fpath, normalize=True)

# output_text = "When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow. The rainbow is a division of white light into many beautiful colors. These take the shape of a long round arch, with its path high above, and its two ends apparently beyond the horizon. There is , according to legend, a boiling pot of gold at one end. People look, but no one ever finds it. When a man looks for something beyond his reach, his friends say he is looking for the pot of gold at the end of the rainbow. Throughout the centuries people have explained the rainbow in various ways. Some have accepted it as a miracle without physical explanation. To the Hebrews it was a token that there would be no more universal floods. The Greeks used to imagine that it was a sign from the gods to foretell war or heavy rain. The Norsemen considered the rainbow as a bridge over which the gods passed from earth to their home in the sky. Others have tried to explain the phenomenon physically. Aristotle thought that the rainbow was caused by reflection of the sun’s rays by the rain. Since then physicists have found that it is not reflection, but refraction by the raindrops which causes the rainbows. Many complicated ideas about the rainbow have been formed. The difference in the rainbow depends considerably upon the size of the drops, and the width of the colored band increases as the size of the drops increases. The actual primary rainbow observed is said to be the effect of super-imposition of a number of bows. If the red of the second bow falls upon the green of the first, the result is to give a bow with an abnormally wide yellow band, since red and green light when mixed form yellow. This is a very common type of bow, one showing mainly red and yellow, with little or no green or blue. "

output_text = 'When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow. \n The rainbow is a division of white light into many beautiful colors. \n These take the shape of a long round arch, with its path high above, and its two ends apparently beyond the horizon. \n There is , according to legend, a boiling pot of gold at one end. People look, but no one ever finds it. \n When a man looks for something beyond his reach, his friends say he is looking for the pot of gold at the end of the rainbow. \n Throughout the centuries people have explained the rainbow in various ways.'

synthesized_wav, sample_rate = run_DeepTalk_demo(
    ref_audio_path='samples/MorganFreeman_speech_ref.wav',
    output_text=output_text)

ref_audio_path = 'samples/ref_VCTKp240.wav'
output_text = 'The Norsemen considered the rainbow as a bridge over which the gods passed from earth to their home in the sky.'
synthesized_wav, sample_rate = run_DeepTalk_demo(ref_audio_path=ref_audio_path,
                                                 output_text=output_text)

#
# print('Synthesized Audio: ')
sample_rate = 16000
Audio(synthesized_wav, rate=sample_rate)
# librosa.output.write_wav('samples/synthe
コード例 #21
0
ファイル: helpers.py プロジェクト: wangyf/tectosaur
def synth(f, duration):
    t = np.linspace(0., duration, int(rate * duration))
    x = np.sin(f * 2. * np.pi * t)
    display(Audio(x, rate=rate, autoplay=True))
コード例 #22
0
				print('Zero terms entered. Please enter a valid song term.')
				continue
			#print(calledSongs)

# Part 2.1: Pick a random song
# After creating a list of songs (part 1), the program picks a song at random from that list.

	randomSong = random.choice(calledSongs)
	#print(randomSong)

# Part 2.2: Play a song preview
# After picking a random song (2.1), the program plays an audio preview of the song by importing a few features from the IPython.display module. The program then generates a URL for an audio file.

	from IPython.display import display, Audio, clear_output
	audio_url = iTunesSearch(randomSong)['results']['trackName'==randomSong]['previewUrl']
	display(Audio(audio_url, autoplay=True))

# Part 2.3: Print a "blanked out" version of the song
# After picking a random song (2.1) and playing a song preview (2.2), the code replaces every alphanumeric character (a-z, 0-9) in the track name with an underscore ('_').

	blankedSong = ''
	for ch in randomSong:
		if ch.isalnum() == True:
			blankedSong += '_'
		elif ch == " ":
			blankedSong += ' '
		else: 
			blankedSong += ch
	print(blankedSong)
	failedAttemptsCount = 0
コード例 #23
0
# ### Waves
#
# A Signal represents a mathematical function defined for all values of time.  If you evaluate a signal at a sequence of equally-spaced times, the result is a Wave.  `framerate` is the number of samples per second.

# In[ ]:

wave = mix.make_wave(duration=0.5, start=0, framerate=11025)
wave

# IPython provides an Audio widget that can play a wave.

# In[ ]:

from IPython.display import Audio
audio = Audio(data=wave.ys, rate=wave.framerate)
audio

# Wave also provides `make_audio()`, which does the same thing:

# In[ ]:

wave.make_audio()

# The `ys` attribute is a NumPy array that contains the values from the signal.  The interval between samples is the inverse of the framerate.

# In[ ]:

print('Number of samples', len(wave.ys))
print('Timestep in ms', 1 / wave.framerate * 1000)
コード例 #24
0
 def open_output_path(self, output_path):
     display(Audio(filename=str(output_path)))
コード例 #25
0
SAMPLES_TO_DISPLAY = 10

test_ds = paths_and_labels_to_dataset(valid_audio_paths, valid_labels)
test_ds = test_ds.shuffle(buffer_size=BATCH_SIZE * 8,
                          seed=SHUFFLE_SEED).batch(BATCH_SIZE)

test_ds = test_ds.map(lambda x, y: (add_noise(x, noises, scale=SCALE), y))

for audios, labels in test_ds.take(1):
    # Get the signal FFT
    ffts = audio_to_fft(audios)
    # Predict
    y_pred = model.predict(ffts)
    # Take random samples
    rnd = np.random.randint(0, BATCH_SIZE, SAMPLES_TO_DISPLAY)
    audios = audios.numpy()[rnd, :, :]
    labels = labels.numpy()[rnd]
    y_pred = np.argmax(y_pred, axis=-1)[rnd]

    for index in range(SAMPLES_TO_DISPLAY):
        # For every sample, print the true and predicted label
        # as well as run the voice with the noise
        print("Speaker:\33{} {}\33[0m\tPredicted:\33{} {}\33[0m".format(
            "[92m" if labels[index] == y_pred[index] else "[91m",
            class_names[labels[index]],
            "[92m" if labels[index] == y_pred[index] else "[91m",
            class_names[y_pred[index]],
        ))
        display(Audio(audios[index, :, :].squeeze(), rate=SAMPLING_RATE))
コード例 #26
0
def listen_to(sample_midi):
    """Create a audio player that renders a PrettyMidi object"""
    sample_wav, rate = midi2wav(sample_midi)
    display(Audio(data=sample_wav, rate=rate))
コード例 #27
0
def test(model,
         noise_type,
         SNR_type,
         test_sample,
         pca=None,
         elec_channel=(1, 124),
         dataset_path='.',
         use_S=True,
         use_E=True,
         elec_only=False,
         display_audio=False,
         show_graph=True,
         enhanced_path=None):
    print(f'{noise_type}, {SNR_type}, {test_sample}')

    device = get_device(model)
    if use_S:
        Sx, phasex, meanx, stdx = load_wave_data(sample_id=test_sample,
                                                 noise_type=noise_type,
                                                 SNR_type=SNR_type,
                                                 is_training=False,
                                                 dataset_path=dataset_path,
                                                 norm=model.use_norm)
        noisy = torch.Tensor([Sx.T]).to(device)
    else:
        Sx = None
        noisy = None

    Sy, phasey, _, _ = load_wave_data(sample_id=test_sample,
                                      is_training=False,
                                      dataset_path=dataset_path,
                                      norm=False)

    if use_E and model.is_use_E():
        elec_data = load_elec_data(test_sample, Sy.shape[1], elec_channel,
                                   dataset_path)
    else:
        elec_data = np.zeros((Sy.shape[1], 124))
    if pca:
        elec_data = pca.transform(elec_data)
    elec = torch.Tensor([elec_data]).to(device)
    elec_data = elec_data.T

    with torch.no_grad():
        Ss, Se, Sf, Sy_, e_ = model(noisy, elec, elec_only=elec_only)

    if Ss is not None:
        Ss = Ss[0].cpu().detach().numpy().T
    if Se is not None:
        Se = Se[0].cpu().detach().numpy().T
    if Sf is not None:
        Sf = Sf[0].cpu().detach().numpy().T
    if e_ is not None:
        e_ = e_[0].cpu().detach().numpy().T
    if Sy_ is not None:
        Sy_ = Sy_[0].cpu().detach().numpy().T
    else:
        return

    if noisy is not None:
        enhanced = spec2wave(Sy_, phasex)
    else:
        enhanced = librosa.core.griffinlim(10**(Sy_ / 2),
                                           n_iter=5,
                                           hop_length=Const.HOP_LENGTH,
                                           win_length=Const.WIN_LENGTH,
                                           window=Const.WINDOW)
    clean = spec2wave(Sy, phasey)

    if use_S:
        noisy = spec2wave(Sx, phasex, meanx, stdx)

    sr = 16000
    if _platform == 'Windows':
        print('PESQ: ',
              pesq_windows(clean, enhanced, test_sample, sr, dataset_path))
#     else:
    print('PESQ: ', pesq(clean, enhanced, sr))
    print('STOI: ', stoi(clean, enhanced, sr, False))
    print('ESTOI:', stoi(clean, enhanced, sr, True))

    saved_sr = 24000
    if enhanced_path is not None:
        test_wav_filename = os.path.join(enhanced_path,
                                         f'{to_TMHINT_name(test_sample)}.wav')
        enhanced = librosa.resample(enhanced, sr, saved_sr)
        wavfile.write(test_wav_filename, saved_sr, enhanced)
#         sf.write(test_wav_filename, enhanced, saved_sr, subtype='PCM_16')

# mel spectrogram
#         mel_basis = librosa.filters.mel(sr, n_fft, n_mels)  # (n_mels, 1+n_fft//2)
#         mel = np.dot(mel_basis, mag)  # (n_mels, t)

#         # to decibel
#         mel = 20 * np.log10(np.maximum(1e-5, mel))
#         mag = 20 * np.log10(np.maximum(1e-5, mag))

#         # normalize
#         mel = np.clip((mel - ref_db + max_db) / max_db, 1e-8, 1)
#         mag = np.clip((mag - ref_db + max_db) / max_db, 1e-8, 1)

#         # Transpose
#         mel = mel.T.astype(np.float32)  # (T, n_mels)
#         mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    if display_audio:
        display(Audio(clean, rate=sr, autoplay=False))
        if use_S:
            display(Audio(noisy, rate=sr, autoplay=False))

        if enhanced_path is None:
            display(Audio(enhanced, rate=sr, autoplay=False))
        else:
            display(Audio(enhanced, rate=saved_sr, autoplay=False))

    if show_graph:
        show_data = [
            (Sx, 'lower', 'jet'),
            (elec_data, 'lower', 'jet'),  #None, cm.Blues),
            (Ss, 'lower', 'jet'),
            (Se, 'lower', 'jet'),
            (Sf, 'lower', 'jet'),
            (Sy_, 'lower', 'jet'),
            (Sy, 'lower', 'jet'),
            #             (e_, None, cm.Blues),
        ]

        f, axes = plt.subplots(len(show_data),
                               1,
                               sharex=True,
                               figsize=(18, 12))
        axes[0].set_xlim(0, Sy.shape[1])

        for i, (data, origin, cmap) in enumerate(show_data):
            if data is not None:
                axes[i].imshow(data, origin=origin, aspect='auto', cmap=cmap)

        plt.tight_layout(pad=0.2)
        plt.show()
コード例 #28
0
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
  audio = r.record(source)

print("音声データの文字起こし結果:\n\n", r.recognize_google(audio,language = "ja"))

audio_text = r.recognize_google(audio, language = "ja")

with open('audio_text.txt', 'w') as f:
  print(audio_text, file=f)

print(len(audio_text))

# 文字数が多すぎると音声にできなかったため文字数を制限
if len(audio_text) > 100:
  audio_text = audio_text[0:99]

# ここからテキストから音声にするもの。最初のものたちをインストールしないと動かない
input_text = audio_text

with torch.no_grad():
    start = time.time()
    x = frontend(input_text)
    c, _, _ = model.inference(x, inference_args)
    y = vocoder.inference(c)
rtf = (time.time() - start) / (len(y) / fs)
print(f"RTF = {rtf:5f}")

from IPython.display import display, Audio
display(Audio(y.view(-1).cpu().numpy(), rate=fs))
コード例 #29
0
def beep(): return Audio(filename='/home/jhoward/beep.mp3', autoplay=True)
def dump(obj, fname): pickle.dump(obj, open(fname, 'wb'))
コード例 #30
0
ファイル: sketch.py プロジェクト: auspicious3000/fast-wavenet
from wavenet.models import Model, Generator

from IPython.display import Audio

import numpy as np

inputs, targets = make_batch('assets/voice.wav')
num_time_samples = inputs.shape[1]
num_channels = 1
gpu_fraction = 1.0

model = Model(num_time_samples=num_time_samples,
              num_channels=num_channels,
              gpu_fraction=gpu_fraction)

Audio(inputs.reshape(inputs.shape[1]), rate=16000)

# In[ ]:

tic = time()
model.train(inputs, targets)
toc = time()

print('Training took {} seconds.'.format(toc - tic))

# In[ ]:

generator = Generator(model)

# Get first sample of input
input_ = inputs[:, 0:1, 0]