Example #1
0
def create_effect(effect_type):

    input_file_path = 'voice_wf.wav'
    output_name = 'voice_mod.wav'
    BLOCKLEN = 1024      # Number of frames per block

    samp_freq, audio_data = read(input_file_path) 
    
    if(effect_type == "Darth-Vader"):
        output_audio = AudioLib.darth_vader(audio_data)
        write(output_name,samp_freq,np.array(output_audio, dtype = np.int16))

    if(effect_type == "Echo"):
        output_audio = AudioLib.echo(audio_data)
        write(output_name,samp_freq,np.array(output_audio, dtype = np.int16))
        
    print('* Finished')
Example #2
0
    def plot_obs(self):
        #Tamano caja
        L = self.Lbox.get()
        #Posicion de la fuente
        X0 = self.xsource.get()
        Y0 = self.ysource.get()
        #Posicion del observador
        XOb = self.xobs.get()
        YOb = self.yobs.get()
        #Frecuencia
        f = self.freq.get()
        #Velocidad de onda
        vel = self.vel.get()
        #Longitud de onda
        lamb = vel / f
        #Condicional de fronteras reflectivas
        reflex = self.frontera.get()

        #Intervalo de tiempo
        dt = 1 / (44100.)
        #Tiempo maximo
        tmax = self.tmax.get()
        #Arreglo de tiempo
        self.tiempo = np.arange(0, tmax, dt)
        #Calculo de campo en el punto del observador
        self.Z = \
        Z_amplitud(XOb, YOb, X0, Y0, self.tiempo, f, lamb )
        if reflex == 1:
            self.Z -= \
            Z_amplitud(XOb, YOb, 0.0, -Y0, self.tiempo, f, lamb )+\
            Z_amplitud(XOb, YOb, 0.0, 2*L-Y0, self.tiempo, f, lamb )+\
            Z_amplitud(XOb, YOb, -X0, 0.0, self.tiempo, f, lamb )+\
            Z_amplitud(XOb, YOb, 2*L-X0, 0.0, self.tiempo, f, lamb )
            self.Z *= 1 / (5.)
        #Grafica
        self.ax.clear()
        self.ax.grid()
        self.ax.set_title('Amplitud Onda Sonora por Observador')
        self.ax.set_xlabel('tiempo [s]')
        self.ax.set_ylabel('Amplitud [$A_0$]')
        self.ax.plot(self.tiempo, self.Z / (1.0 * ad.Amplitude))
        self.ax.set_ylim((-1, 1))
        self.ax.set_xlim((0, tmax))
        self.canvas.draw()
        #Creando objeto de audio
        sonido = ad.audio()
        #Cargando nota de audio
        sonido.load(self.Z)
        #Reproduciendo sonido
        sonido.play()
 def plot_obs(self):
   #Tamano caja
   L = self.Lbox.get()
   #Posicion de la fuente
   X0 = self.xsource.get()
   Y0 = self.ysource.get()
   #Posicion del observador
   XOb = self.xobs.get()
   YOb = self.yobs.get()
   #Frecuencia
   f = self.freq.get()
   #Velocidad de onda
   vel = self.vel.get()
   #Longitud de onda
   lamb = vel/f
   #Condicional de fronteras reflectivas
   reflex = self.frontera.get()
   
   #Intervalo de tiempo
   dt = 1/(44100.)
   #Tiempo maximo
   tmax = self.tmax.get()
   #Arreglo de tiempo
   self.tiempo = np.arange( 0, tmax, dt )
   #Calculo de campo en el punto del observador
   self.Z = \
   Z_amplitud(XOb, YOb, X0, Y0, self.tiempo, f, lamb )
   if reflex == 1:
     self.Z -= \
     Z_amplitud(XOb, YOb, 0.0, -Y0, self.tiempo, f, lamb )+\
     Z_amplitud(XOb, YOb, 0.0, 2*L-Y0, self.tiempo, f, lamb )+\
     Z_amplitud(XOb, YOb, -X0, 0.0, self.tiempo, f, lamb )+\
     Z_amplitud(XOb, YOb, 2*L-X0, 0.0, self.tiempo, f, lamb )
     self.Z *= 1/(5.)
   #Grafica
   self.ax.clear()
   self.ax.grid()
   self.ax.set_title('Amplitud Onda Sonora por Observador')
   self.ax.set_xlabel('tiempo [s]')
   self.ax.set_ylabel('Amplitud [$A_0$]')
   self.ax.plot( self.tiempo, self.Z/(1.0*ad.Amplitude) )
   self.ax.set_ylim( (-1, 1) )
   self.ax.set_xlim( (0, tmax) )
   self.canvas.draw()
   #Creando objeto de audio
   sonido = ad.audio()
   #Cargando nota de audio
   sonido.load( self.Z )
   #Reproduciendo sonido
   sonido.play()
Example #4
0
tau = 1.

#Tiempo maximo [s]
tmax = 5
#Intervalos
dt = 1/44100.
#Arreglo de tiempo 
tiempo = np.arange( 0, tmax, dt )

#Potencial
V = Vfem( tiempo )
#Oscilacion original
y_cuerda = y( tiempo )

#Grafica
plt.plot( tiempo, V, linewidth = 0.5 )
plt.grid()
plt.title( "Potencial inducido por una bobina fonocaptora")
plt.xlabel( "t [s]" )
plt.ylabel( "Potential [V_0]" )
plt.show()

#Audio producido por la bobina
nota_bobina = ad.audio()
nota_bobina.load( V*ad.Amplitude/max(V) )
nota_bobina.play()

#Audio original de la cuerda
nota_cuerda = ad.audio()
nota_cuerda.load( y_cuerda*ad.Amplitude/max(y_cuerda) )
nota_cuerda.play()
Example #5
0
def fun_effect(stvar):

    instrument = stvar
    data = pd.read_csv("Notes_Freq_Match.csv")
    data = np.asarray(data)

    freq_notes = {}

    for r in range(data.shape[0]):
        for n in range(int(data[r, 2]), data[r, 3] + 1):
            freq_notes.update({n: data[r, 0]})

    freq_map = {}
    for r in range(data.shape[0]):
        for n in range(int(data[r, 2]), data[r, 3] + 1):
            freq_map.update({n: data[r, 1]})

    MAXVALUE = 2**15 - 1  # Maximum allowed output signal value (because WIDTH = 2)
    WIDTH = 2  # bytes per sample
    CHANNELS = 1  # mono
    RATE = 8000  # Sampling rate (samples/second)
    BLOCKSIZE = 8000  # length of block (samples)
    DURATION = 10  # Duration (seconds)

    voice_mod_wf = wave.open('voice_piano_wf.wav',
                             'w')  # Write a mono wave file
    voice_mod_wf.setnchannels(CHANNELS)  # mono
    voice_mod_wf.setsampwidth(WIDTH)  # two bytes per sample
    voice_mod_wf.setframerate(RATE)  # samples per second

    voice_wf = wave.open('voice_wf.wav', 'w')  # Write a mono wave file
    voice_wf.setnchannels(CHANNELS)  # mono
    voice_wf.setsampwidth(WIDTH)  # two bytes per sample
    voice_wf.setframerate(RATE)  # samples per second

    NumBlocks = int(DURATION * RATE / BLOCKSIZE)
    output_block = BLOCKSIZE * [0]

    Ta = 5
    r = 0.01**(1.0 / (Ta * RATE))

    # Filter coefficients
    ORDER = 2

    f = [[]] * data.shape[0]
    a = [[]] * data.shape[0]
    b = [[]] * data.shape[0]
    x = [[]] * data.shape[0]
    y = [[]] * data.shape[0]
    om = [[]] * data.shape[0]
    states = [[]] * data.shape[0]
    new_states = [[]] * data.shape[0]

    for i in range(data.shape[0]):
        f[i] = data[i, 1]
        om[i] = 2 * pi * f[i] / RATE
        a[i] = [1, -2 * r * cos(om[i]), r**2]
        b[i] = [r * sin(om[i])]
        states[i] = np.zeros(ORDER)
        new_states[i] = np.zeros(ORDER)
        x[i] = np.zeros(BLOCKSIZE)
        y[i] = np.zeros(BLOCKSIZE)
        yout = np.zeros(BLOCKSIZE)

    DBscale = True

    # Initialize plot window:
    plt.ion()  # Turn on interactive mode so plot gets updated
    fig = plt.figure(1)

    [g1] = plt.plot([], [], 'red')
    [g2] = plt.plot([], [], 'blue')

    g1.set_label('Input')
    g2.set_label('Output')
    plt.legend()

    g1.set_xdata(RATE / BLOCKSIZE * np.arange(0, BLOCKSIZE))
    g2.set_xdata(RATE / BLOCKSIZE * np.arange(0, BLOCKSIZE))
    plt.xlim(0, 0.5 * RATE)

    plt.xlabel('Frequency (Hz)')
    plt.ylim(0, 150)

    # Open audio device:
    p = pyaudio.PyAudio()
    PA_FORMAT = p.get_format_from_width(WIDTH)

    stream = p.open(format=PA_FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    output=True,
                    frames_per_buffer=256)

    for i in range(0, NumBlocks):
        input_bytes = stream.read(
            BLOCKSIZE, exception_on_overflow=False)  # Read audio input stream

        input_tuple = struct.unpack('h' * BLOCKSIZE, input_bytes)  # Convert
        X = np.fft.fft(input_tuple)

        freqs = np.fft.fftfreq(len(X))

        idx = np.argmax(np.abs(X))
        freq = freqs[idx]
        freq_in_hertz = int(abs(freq * RATE))

        f_mapped = freq_map[freq_in_hertz]
        idx = np.where(data == f_mapped)[0]
        idx = int(idx)

        if ((freq_in_hertz >= 200) & (freq_in_hertz <= data[71, 1])):
            x[idx][0] = 1000

        if (instrument == "Piano"):

            for j in range(0, data.shape[0]):
                [y[j], new_states[j]] = signal.lfilter(b[j],
                                                       a[j],
                                                       x[j],
                                                       zi=states[j])
                if (j < data.shape[0] - 1):
                    states[j + 1] = new_states[j]
                x[j][0] = 0.0
                yout += y[j]

        elif ((instrument == "Guitar") & (freq_in_hertz >= 200)):

            n = freq_in_hertz - 262
            yout = AudioLib.set_stretch(guitar_wf, 10, 200, 50)

            if len(yout != BLOCKSIZE):
                yout = np.pad(yout, (0, BLOCKSIZE - len(yout)), 'constant')

        else:
            print(
                "Frequency not suitable for guitar transformation. Try higher frequncies next time"
            )

        #for j in range (0,data.shape[0]):
        #    yout += y[j]
        #    y[j] = np.zeros(BLOCKSIZE)

        yout = AudioLib.bpf(yout, f_mapped - 10, f_mapped + 10)
        output_block = np.clip(yout.astype(int), -MAXVALUE, MAXVALUE)

        Y = np.fft.fft(output_block)

        # Convert values to binary data
        output_bytes = struct.pack('h' * BLOCKSIZE, *output_block)

        # Write binary data to audio output stream
        stream.write(output_bytes)

        voice_mod_wf.writeframes(output_bytes)
        voice_wf.writeframes(input_bytes)

        # Update y-data of plot
        g1.set_ydata(20 * np.log10(np.abs(X)))
        plt.pause(0.001)
        g2.set_ydata(20 * np.log10(np.abs(Y)))
        plt.pause(0.001)

        yout = np.zeros(BLOCKSIZE)
        output_block = BLOCKSIZE * [0]

    plt.close()
    stream.stop_stream()
    stream.close()
    p.terminate()
    print('* Finished')
# Efecto Doppler
#==========================================================

#**********************************************************
#	MODULOS
#**********************************************************
import numpy as np
import os
import matplotlib.pylab as plt
import AudioLib as ad

#Velocidad del sonido [m/s]
vs = 331.5

#Creando objeto de audio
sonido = ad.audio()

#Intervalo de tiempo
dt = 1/(44100.)
#Frecuencia de nota [Hz]
freq0 = 110.

#Tiempo maximo	[s]
tmax = 6.
#Arreglo de tiempo
tiempo = np.arange( 0, tmax, dt )
#Nota a reproducir
nota = ad.Amplitude*np.sin( 2*np.pi*freq0*tiempo )

#Cargando nota de audio
sonido.load( nota )
Example #7
0
# Efecto Doppler
#==========================================================

#**********************************************************
#	MODULOS
#**********************************************************
import numpy as np
import os
import matplotlib.pylab as plt
import AudioLib as ad

#Velocidad del sonido [m/s]
vs = 331.5

#Creando objeto de audio
sonido = ad.audio()

#Intervalo de tiempo
dt = 1 / (44100.)
#Frecuencia de nota [Hz]
freq0 = 110.

#Tiempo maximo	[s]
tmax = 6.
#Arreglo de tiempo
tiempo = np.arange(0, tmax, dt)
#Nota a reproducir
nota = ad.Amplitude * np.sin(2 * np.pi * freq0 * tiempo)

#Cargando nota de audio
sonido.load(nota)