sys.path.append('/home/manu/workspace/meeg_denoise')
from tools import cochleo_tools
#from classes import sketch
import matplotlib.pyplot as plt
import matplotlib.colors as cc
import matplotlib.cm as cm
from PyMP import Signal
import stft
from scipy.signal import lfilter, hann
plt.switch_backend('Agg')
audio_test_file = '/home/manu/workspace/recup_angelique/Sketches/NLS Toolbox/Hand-made Toolbox/forAngelique/61_sadness.wav'
audio_test_file = '/sons/jingles/panzani.wav'
figure_output_path = '/home/manu/workspace/audio-sketch/src/reporting/figures/'

sig = Signal(audio_test_file, mono=True, normalize=True)
sig.downsample(16000)
scale = 512
step = 32

sig.spectrogram(scale, step, order=1, log=False, cmap=cm.coolwarm)


def plot_spectrogram(sig_stft, scale=512, step=128):
    plt.figure()
    plt.imshow(20 * np.log10(np.abs(sig_stft[0, :, :])),
               aspect='auto',
               origin='lower',
               interpolation='nearest',
               cmap=cm.copper_r)

    x_tick_vec = (np.linspace(0, sig_stft.shape[2], 10)).astype(int)
Esempio n. 2
0
from src.tools import cochleo_tools
#from classes import sketch
import matplotlib.pyplot as plt
from PyMP import Signal
from scipy.signal import lfilter, hann
from scipy.io import loadmat
#from scipy.fftpack import fft, ifft
from numpy.fft import fft, ifft

plt.switch_backend('Agg')
audio_test_file = '/home/manu/workspace/recup_angelique/Sketches/NLS Toolbox/nsltools/_done.au'
audio_test_file = '/sons/jingles/panzani.wav'

############################### Inversion
sig = Signal(audio_test_file, mono=True, normalize=True)
sig.downsample(8000)
# convert to auditory
params = {'frmlen': 8, 'shift': 0, 'fac': -2, 'BP': 1}

gram = cochleo_tools.Cochleogram(sig.data, **params)

import cProfile

cProfile.runctx('gram.build_aud()', globals(), locals())
cProfile.runctx('gram.build_aud_old()', globals(), locals())

aud = gram.build_aud()
# Cortico-gram : 2D complex transform of y5
# we need to define y = gram.y5, para1= vector pf parameters, rv = rate vector, sv = scale vector
y = np.array(gram.y5)
Esempio n. 3
0
T = neighbs.shape[0]
for t in range(T):
    Y_hat[t, :] = np.median(learn_magspecs_all[neighbs[t, :], :], 0)

init_vec = np.random.randn(128 * Y_hat.shape[0])
x_recon = transforms.gl_recons(Y_hat.T,
                               init_vec,
                               50,
                               wsize,
                               128,
                               display=False)

import sti

orig_sig = Signal(learn_audiofilepath, mono=True, normalize=True)
orig_sig.downsample(16000)
sig = Signal(x_recon, 16000, normalize=True)

score = sti.stiFromAudio(orig_sig.data,
                         x_recon,
                         16000,
                         calcref=False,
                         downsample=None,
                         name="unnamed")

# can we perform viterbi decoding ?
n_candidates = neighbs.shape[1]
n_states = neighbs.shape[0]
transition_cost = np.ones((n_candidates, ))
cum_scores = np.zeros((n_candidates, ))
paths = []