예제 #1
0
    def __init__(self, temp):
        self.CHUNK = 5500
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 2
        self.RATE = 44100
        self.RECORD_SECONDS = temp
        self.mensagem = ''
        self.flag = 0
        self.ini = 0
        self.fim = 0
        self.tempo = temp

        # use a Blackman window
        self.window = np.blackman(self.CHUNK)

        # Criando instância do pyaudio
        self.p = pyaudio.PyAudio()

        #Criando o cinversor
        self.converter = Conversor()

        # Criando Fluxo de midia
        self.stream = self.p.open(
            format=self.FORMAT,
            channels=self.CHANNELS,
            rate=self.RATE,
            input=True,
            frames_per_buffer=self.CHUNK,
        )

        # Array para armazenar audio
        self.frames = []
예제 #2
0
    def listening(self):
        msg = ''
        conversor = Conversor()
        while (True):
            receiver = Receiver(1)
            byteRecebido = receiver.listening
            receiver = Receiver(0.5)
            confirmByte = receiver.listening

            if (int(confirmByte)):
                msg += conversor.bin_to_str(byteRecebido)
                print("LISTENING: ", msg)
예제 #3
0
class Enlace:
    def __init__(self):
        self.camadaFisica = GeraSom()
        self.sem = threading.Semaphore()
        self.conversor = Conversor()
        self.correctByte = False

    def transmission(self, pacote, flag):
        self.sem.acquire()
        dadoRecebido = ''
        try:
            for palavra in pacote.quantDados:
                for letra in palavra:
                    while not (self.correctByte):
                        receiver = Receiver(1)
                        pool = ThreadPool(processes=1)
                        async_call = pool.apply_async(receiver.listening)
                        self.camadaFisica.emitir(letra, flag)
                        arquivoRecebido = async_call.get()
                        arquivoRecebido = self.conversor.bin_to_str(
                            arquivoRecebido)

                        if (arquivoRecebido == letra):
                            self.correctByte = True
                            print("arquivo Recebido: ", arquivoRecebido)
                            dadoRecebido += arquivoRecebido
                            self.camadaFisica.correct(self.correctByte)
                        else:
                            self.camadaFisica.correct(self.correctByte)
                    self.correctByte = False

            print("MENSAGEM FINAL RECEBIDA:", dadoRecebido)

        finally:
            self.sem.release()
예제 #4
0
class GeraSom(object):

    def __new__(cls, *args, **kwargs):
        if not hasattr(cls, '_instance'):
            cls._instance = super(GeraSom, cls).__new__(cls, *args, **kwargs)
            return cls._instance
        return -1

    def __init__(self):
        self.wave_obj_1 = sa.WaveObject.from_wave_file("../assets/bit1.wav") # Frequência: 10000hz (apróx.)
        self.wave_obj_0 = sa.WaveObject.from_wave_file("../assets/bit0.wav") # Frequência: 2690hz (apróx.)
        self.converter = Conversor()


    def emitir(self, dados, flag):
        
        if not(flag):
            dado = self.converter.str_to_bin( dados )
            
            for byte in dado:
                for bit in byte:
                    if bit == '0':
                        play_obj = self.wave_obj_0.play()
                        play_obj.wait_done()
                        time.sleep(0.1)
                    elif bit == '1':
                        play_obj = self.wave_obj_1.play()
                        play_obj.wait_done()
                        time.sleep(0.1)


    def correct(self, correctByte):
        if(correctByte):
            play_obj = self.wave_obj_1.play()
            play_obj.wait_done()
        else:
            play_obj = self.wave_obj_0.play()
            play_obj.wait_done()
예제 #5
0
 def __init__(self):
     self.camadaFisica = GeraSom()
     self.sem = threading.Semaphore()
     self.conversor = Conversor()
     self.correctByte = False
예제 #6
0
 def __init__(self):
     self.wave_obj_1 = sa.WaveObject.from_wave_file("../assets/bit1.wav") # Frequência: 10000hz (apróx.)
     self.wave_obj_0 = sa.WaveObject.from_wave_file("../assets/bit0.wav") # Frequência: 2690hz (apróx.)
     self.converter = Conversor()
예제 #7
0
    def perform_conversion(text,
                           filename,
                           in_format,
                           out_format,
                           aligner='HTK'):

        text1 = text
        converter = Conversor()
        lexer = PhonemeLexer()
        parser = PhonemeParser()

        if (in_format == 'graf'):
            text = converter.convert_sentence(text1)

        if (out_format == 'fonema'):
            # Converte para fonemas individuais
            text = re.sub(
                ' +', ' ',
                " ".join(map(lambda x: x.value,
                             lexer.tokenize(text))).replace(",", ""))
        elif (out_format == 'silaba'):

            v = []
            # Marca tonicas com ','
            for palavra in text.split():
                for position, character in enumerate(reversed(palavra)):
                    if (character in ['a', 'i', 'u', 'e', 'o']):
                        tmp = list(palavra)
                        tmp.insert(len(palavra) - position, ',')
                        v.append("".join(tmp))
                        break

            # Converte para silabas foneticas
            text = re.sub(
                ' +', ' ',
                " ".join(map(lambda x: x.value,
                             lexer.tokenize(" ".join(v)))).replace(",", ""))
        elif (out_format == 'todos'):
            v = []
            # Marca tonicas com ','
            for palavra in text.split():
                for position, character in enumerate(reversed(palavra)):
                    if (character in ['a', 'i', 'u', 'e', 'o']):
                        tmp = list(palavra)
                        tmp.insert(len(palavra) - position, ',')
                        v.append("".join(tmp))
                        break
            # Conversao em silabas foneticas eh salva em text1.out
            with open("text1.out", "w") as f:
                f.write(parser.parse(lexer.tokenize(" ".join(v))))
            # Conversao em palavras eh salva em text2.out
            with open("text2.out", "w") as tt:
                tt.write(text)

            text = re.sub(
                ' +', ' ',
                " ".join(map(lambda x: x.value,
                             lexer.tokenize(text))).replace(",", ""))

        if (aligner == 'HTK' or aligner == 'Kaldi'):
            text = text.split()
            text.insert(0, "sil")
            text.append("sil")
            text = " ".join(text)

        txt_cnv = TextConverter()
        txt_cnv.create_dict(text)

        if (aligner == 'HTK'):
            txt_cnv.mlf(text.split(), filename)
            txt_cnv.scp(filename)
        elif (aligner == 'Kaldi'):
            with open("train.lab", "w") as f:
                f.write(text)
        return text
예제 #8
0
class Receiver:
    def __init__(self, temp):
        self.CHUNK = 5500
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 2
        self.RATE = 44100
        self.RECORD_SECONDS = temp
        self.mensagem = ''
        self.flag = 0
        self.ini = 0
        self.fim = 0
        self.tempo = temp

        # use a Blackman window
        self.window = np.blackman(self.CHUNK)

        # Criando instância do pyaudio
        self.p = pyaudio.PyAudio()

        #Criando o cinversor
        self.converter = Conversor()

        # Criando Fluxo de midia
        self.stream = self.p.open(
            format=self.FORMAT,
            channels=self.CHANNELS,
            rate=self.RATE,
            input=True,
            frames_per_buffer=self.CHUNK,
        )

        # Array para armazenar audio
        self.frames = []

    def listening(self):
        for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):
            data = self.stream.read(self.CHUNK)
            self.frames.append(data)

            # write data out to the audio stream
            #self.stream.write(data)

            # unpack the data and times by the hamming window
            indata = np.array(wave.struct.unpack("%dh"%(len(data)/2),\
                                                data))

            # Take the fft and square each value
            fftData = abs(np.fft.rfft(indata))**2

            # find the maximum
            which = fftData[1:].argmax() + 1

            # use quadratic interpolation around the max
            if which != len(fftData) - 1:
                y0, y1, y2 = np.log(fftData[which - 1:which + 2:])
                x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
                # find the frequency and output it
                thefreq = (which + x1) * self.RATE / self.CHUNK

                if (thefreq > 2000 and thefreq < 3500):
                    self.mensagem += '0'
                    if (self.tempo == 0.5):
                        return self.mensagem
                elif (thefreq > 8000 and thefreq < 12000):
                    self.mensagem += '1'
                    if (self.tempo == 0.5):
                        return self.mensagem
            else:
                thefreq = which * self.RATE / self.CHUNK

                if (thefreq > 2000 and thefreq < 3500):
                    self.mensagem += '0'
                    if (self.tempo == 0.5):
                        return self.mensagem
                elif (thefreq > 8000 and thefreq < 12000):
                    self.mensagem += '1'
                    if (self.tempo == 0.5):
                        return self.mensagem

        print("****Transmition Ended! ****")

        msg = ''
        count = 0
        for bit in self.mensagem:
            msg += bit
            count += 1
            if (count == 7):
                msg += ' '
                count = 0
        return msg
        print("msg: ", msg)
        print("Saida em texto: ", self.converter.bin_to_str(msg))

        self.stream.stop_stream()
        self.stream.close()
        self.p.terminate()