Exemplo n.º 1
0
 def play_music(self):
     if not self.music_state:
         try:
             self.current_song.stop()
         except TypeError:
             pass
         except AttributeError as e:
             if 'NoneType' in e:
                 pass
             raise e
         return "MUSIC OFF"
     # music for menus
     if self.__manager is not None and (self.__manager.current
                                        not in ['game']):
         try:
             self.current_song = load_audio(LOCAL_DEFAULTS['fx']['intro'])
             self.current_song.play()
             return
         except KeyError:
             return
     else:
         try:
             s = self.music_playlist.pop()
         except IndexError as e:
             self.music_playlist = [str(_) for _ in self.music_played]
             del self.music_played[:]
             s = self.music_playlist.pop()
     self.current_song = load_audio(s)
     self.music_played.append(str(s))
     self.current_song.play()
Exemplo n.º 2
0
def denoise(sample_fname, backup_suffix, fnames):
    if sample_fname is None:
        print("error: noise sample not specified for denoise mode.",
              file=stderr)
        return 1

    samp_data, samp_rate = load_audio(sample_fname)
    noise_tab = {samp_rate: noise_spec(samp_data, samp_rate)}

    for fname in fnames:
        try:
            src_data, src_rate = load_audio(fname)
        except FileNotFoundError:
            print("Error File {} not found!".format(fname), file=stderr)
            continue

        with simple_progressbar(fname) as bar:
            noise = noise_tab.get(src_rate)
            if noise is None:
                samp = librosa.resample(samp_data, samp_rate, src_rate)
                noise = noise_spec(samp, src_rate)
                noise_tab[src_rate] = noise

            res_data = reduce_noise(src_data, src_rate, noise,
                                    progress=bar.update)

        base, ext = os.path.splitext(fname)
        if backup_suffix:
            if format_dot_place == -1:
                os.rename(fname, fname + backup_suffix)
            else:
                os.rename(fname, base + backup_suffix + ext)
        export_audio(fname, res_data, src_rate)

    return 0
Exemplo n.º 3
0
def main():
    global ents, sc, run
    run = True

    graphics.init(con.SCR_WIDTH, con.SCR_HEIGHT)
    sc = score.Score()

    try:
        pygame.mixer.pre_init(44100, -16, 2, 4096)
        pygame.mixer.init()
    except:
        print "You don't have audio support."
        con.audio_support = False

    if con.audio_support:
        audio.load_audio()
        pygame.mixer.music.load(os.path.join('audio', 'jl_music2.ogg'))
        pygame.mixer.music.set_volume(.9)

    while run:
        game_start()
        game_loop()
        game_end()

    pygame.quit()
Exemplo n.º 4
0
 def audio_callback(self, audio_type, audio_name, extra=None):
     if len(self.fx_bucket) > 10:
         self.empty_fx_bucket()
     if audio_type in ['fx', 'FX']:
         try:
             if self.level_name in soundfx.keys():
                 _ = load_audio(LOCAL_DEFAULTS['fx'][soundfx[
                     self.level_name][audio_name]])
             else:
                 _ = load_audio(LOCAL_DEFAULTS['fx'][audio_name])
             _.play()
             self.fx_bucket.append(_)
         except KeyError:
             pass
Exemplo n.º 5
0
def start(fnames):
    def timestr(seconds_fp):
        mseconds = round(seconds_fp * 1e3)
        mseconds_only = mseconds % 1000
        seconds = mseconds // 1000
        seconds_only = seconds % 60
        minutes = seconds // 60
        minutes_only = minutes % 60
        hours = minutes // 60
        return "{:02d}:{:02d}:{:02d}.{:03d}".format(hours, minutes_only,
                                                    seconds_only,
                                                    mseconds_only)

    signals, rate = resample_to_common(
        load_audio(fname, normalize=True) for fname in fnames)
    with simple_progressbar('Detecting repetitions') as bar, open(
            'output.txt', 'w') as sourceFile:
        for t1, t2, l, p in get_repetitions(signals, rate,
                                            progress=bar.update):
            i1, tt1 = t1
            i2, tt2 = t2
            percent = 100 * p
            print("repetition: {} {}--{} <=> {} {}--{} ({:.1f}%)".format(
                fnames[i1], timestr(tt1), timestr(tt1 + l), fnames[i2],
                timestr(tt2), timestr(tt2 + l), percent),
                  file=sourceFile)
def split_on_silence_with_librosa(audio_path,
                                  top_db=40,
                                  frame_length=1024,
                                  hop_length=256,
                                  skip_idx=0,
                                  out_ext="wav",
                                  min_segment_length=3,
                                  max_segment_length=8,
                                  pre_silence_length=0,
                                  post_silence_length=0):

    filename = os.path.basename(audio_path).split('.', 1)[0]
    in_ext = audio_path.rsplit(".")[1]

    audio = load_audio(audio_path)

    edges = librosa.effects.split(audio,
                                  top_db=top_db,
                                  frame_length=frame_length,
                                  hop_length=hop_length)

    new_audio = np.zeros_like(audio)
    for idx, (start, end) in enumerate(edges[skip_idx:]):
        new_audio[start:end] = remove_breath(audio[start:end])

    save_audio(new_audio, add_postfix(audio_path, "no_breath"))
    audio = new_audio
    edges = librosa.effects.split(audio,
                                  top_db=top_db,
                                  frame_length=frame_length,
                                  hop_length=hop_length)

    audio_paths = []
    for idx, (start, end) in enumerate(edges[skip_idx:]):
        segment = audio[start:end]
        duration = get_duration(segment)

        if duration <= min_segment_length or duration >= max_segment_length:
            continue

        output_path = "{}/{}.{:04d}.{}".format(os.path.dirname(audio_path),
                                               filename, idx, out_ext)

        padded_segment = np.concatenate([
            get_silence(pre_silence_length),
            segment,
            get_silence(post_silence_length),
        ])

        save_audio(padded_segment, output_path)
        audio_paths.append(output_path)

    return audio_paths
Exemplo n.º 7
0
def _process_utterance(audio_path, data_dir, tokens, loss_coeff):
    audio_name = os.path.basename(audio_path)

    filename = audio_name.rsplit('.', 1)[0] + ".npz"
    numpy_path = os.path.join(data_dir, filename)

    if not os.path.exists(numpy_path):
        wav = load_audio(audio_path)

        try:
          linear_spectrogram = spectrogram(wav).astype(np.float32)
          mel_spectrogram = melspectrogram(wav).astype(np.float32)
        except:
          return 0

        data = {
            "linear": linear_spectrogram.T,
            "mel": mel_spectrogram.T,
            "tokens": tokens,
            "loss_coeff": loss_coeff,
        }

        n_frame = linear_spectrogram.shape[1]

        if hparams.skip_inadequate:
            min_n_frame = hparams.reduction_factor * hparams.min_iters
            max_n_frame = hparams.reduction_factor * hparams.max_iters - hparams.reduction_factor

            if min_n_frame <= n_frame <= max_n_frame and len(tokens) >= hparams.min_tokens:
                return None

        np.savez(numpy_path, **data, allow_pickle=False)
    else:
        try:
            data = np.load(numpy_path)
            n_frame = data["linear"].shape[0]
        except:
            remove_file(numpy_path)
            return _process_utterance(audio_path, data_dir, tokens, loss_coeff)

    return n_frame
Exemplo n.º 8
0
def text_recognition(path, config):
    root, ext = os.path.splitext(path)
    txt_path = root + ".txt"

    if os.path.exists(txt_path):
        with open(txt_path) as f:
            out = json.loads(open(txt_path).read())
            return out

    from google.cloud import speech
    from google.cloud.speech import enums
    from google.cloud.speech import types

    out = {}
    error_count = 0

    tmp_path = os.path.splitext(path)[0] + ".tmp.wav"

    while True:
        try:
            client = speech.SpeechClient()

            content = load_audio(
                path,
                pre_silence_length=config.pre_silence_length,
                post_silence_length=config.post_silence_length)

            max_duration = config.max_duration - \
                    config.pre_silence_length - config.post_silence_length
            audio_duration = get_duration(content)

            if audio_duration >= max_duration:
                print(" [!] Skip {} because of duration: {} > {}". \
                        format(path, audio_duration, max_duration))
                return {}

            content = resample_audio(content, config.sample_rate)
            save_audio(content, tmp_path, config.sample_rate)

            with io.open(tmp_path, 'rb') as f:
                audio = types.RecognitionAudio(content=f.read())

            config = types.RecognitionConfig(
                encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
                sample_rate_hertz=config.sample_rate,
                language_code='ko-KR')

            response = client.recognize(config, audio)
            if len(response.results) > 0:
                alternatives = response.results[0].alternatives

                results = [
                    alternative.transcript for alternative in alternatives
                ]
                assert len(results) == 1, "More than 1 results: {}".format(
                    results)

                out = {path: "" if len(results) == 0 else results[0]}
                print(path, results[0])
                break
            break
        except Exception as err:
            raise Exception("OS error: {0}".format(err))

            error_count += 1
            print("Skip warning for {} for {} times". \
                    format(path, error_count))

            if error_count > 5:
                break
            else:
                continue

    remove_file(tmp_path)
    with open(txt_path, 'w') as f:
        json.dump(out, f, indent=2, ensure_ascii=False)

    return out
Exemplo n.º 9
0
    def handle_event(self, event):
        if event.type == SDL_QUIT:
            self.running = False
        elif event.type == SDL_DROPFILE:
            self.clock.tick()
        elif event.type == SDL_KEYDOWN and event.key.repeat == 0:
            if event.key.keysym.sym == SDLK_ESCAPE:
                self.running = False
            elif event.key.keysym.sym == SDLK_F1:
                self.debug_info = not self.debug_info

    def draw(self):
        pass

    def stop(self):
        self.running = False


if __name__ == "__main__":
    ext.init()
    graphics = Graphics(SCREEN_WIDTH, SCREEN_HEIGHT)
    AllSprite.load_sprites(graphics)
    AudioManager.init_audio()
    load_audio()
    FontManager.load_font("ressources/Rebellion.ttf", "rebel", 28)
    FontManager.load_font("ressources/DejaVuSansMono.ttf", "dejavu", 20)
    game = Menu(graphics)
    game.run()
    ext.quit()
Exemplo n.º 10
0
def HP_Parser(transcript):
    global __easter
    global __adisab
    global __vdisab

    transcript = transcript.title()

    aresp = None
    vresp = None

    if transcript == "Sectumsempra":
        aresp = audio.load_audio(BASE_PATH +
                                 "/.audio/24000/Half-Blood_Prince.wav")

    #
    # Student to Implement "Reveal Your Secret"
    #  Use "./.audio/24000/Out-of-Other_Business-I.wav" and "./.audio/24000/Out-of-Other_Business-II.wav/"
    #
    elif transcript == "Reveal Your Secret" or transcript == "Review Your Secret":
        aresp = audio.load_audio(
            BASE_PATH +
            "/.audio/24000/Out-of-Other_Business-I.wav") + audio.load_audio(
                BASE_PATH + "/.audio/24000/Out-of-Other_Business-II.wav")

    #
    # Student to Implement "Lumos Maxima"
    #
    elif transcript == "Lumos Maxima":
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Wand_Swing.wav")

        vresp = [colour["grey"] for _ in range(8 * 8)]
    elif transcript == "Lumos Solem":
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Wand_Swing.wav")

        vresp = [[colour[name] for _ in range(8 * 8)] for name in [
            "lgrey", "white", "white", "white", "white", "white", "white",
            "lgrey", "white"
        ]]

    #
    # Student to Implement "Incendio"
    #
    elif transcript == "Incendio":
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Wand_Swing.wav")

        vresp = [colour["red"] for _ in range(8 * 8)]
    elif transcript == "Nox" or transcript == "Knox":
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Wand_Swing.wav")

        __vdisab = True

    #
    # Student to Implement "Silencio"
    #
    elif transcript == "Silencio":
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Wand_Swing.wav")

        __adisab = True
    elif transcript == "Obliviate" or transcript == "Alleviate":
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Wand_Swing.wav")

        __easter = False

    return (aresp, vresp)
Exemplo n.º 11
0
def action_parser(transcript):
    global colour
    global __senhat
    global __dcolor
    global __tcscal
    global __easter
    global __return
    global __adisab
    global __vdisab
    global prev_reply
    vtext = None
    dtext = None
    aresp = None
    vresp = None
    '''
    if re.search("What|Show|Tell|Know|Display", transcript, re.I) :
        if re.search(r'\b(temperature)\b', transcript, re.I) :
            _ = round(__senhat.get_temperature(), 1)

            tcscal = __tcscal

            if re.search(r'\b(celsius)\b', transcript, re.I) :
                tcscal = True

            elif re.search(r'\b(fahrenheit)\b', transcript, re.I) :
                tcscal = False

            _ = (_ if tcscal is True else round((_ * 9 / 5) + 32, 1))

            vtext = str(_) + " degree " + ("Celsius" if tcscal is True else "Fahrenheit")
            dtext = str(_) + "\"" + ("C" if tcscal is True else "F")

        if re.search(r'\b(humidity)\b', transcript, re.I) :
            _ = int(__senhat.get_humidity())

            vtext = str(_) + " percent"
            dtext = str(_) + "%"

        if re.search(r'\b(date|today)\b', transcript, re.I) :
            vtext = time.strftime("%Y-%m-%d")

        if re.search(r'\b(time|clock)\b', transcript, re.I) :
            vtext = time.strftime("%H:%M")

        if re.search(r'\b(days of week|weekday|day)\b', transcript, re.I) :
            vtext = time.strftime("%A")

    if re.search("Set|Change|Configure|Modify", transcript, re.I) :
        if re.search(r'\b(celsius)\b', transcript, re.I) :
            vtext = "Changing Temperature Scale to Degree Celsius"

            __tcscal = True
            dtext = "-> \"C"

        if re.search(r'\b(fahrenheit)\b', transcript, re.I) :
            vtext = "Changing Temperature Scale to Degree Fahrenheit"

            __tcscal = False
            dtext = "-> \"F"

        for colour_i in ["red", "green", "blue", "yellow", "cyan", "magenta", "white", "grey", "violet", "purple"] :
            if re.search(colour_i, transcript, re.I) :
                vtext = "Changing Text Colour to " + colour_i.title()

                __dcolor = colour[colour_i]
                dtext = colour_i.title()
        '''
    # if re.search(r'\b(red)\b', transcript, re.I) :
    #     vtext = "Changing text colour to red"

    #     __dcolor = colour["red"]
    #     dtext = "Red"

    # if re.search(r'\b(blue)\b', transcript, re.I) :
    #     vtext = "Changing text colour to blue"

    #     __dcolor = colour["blue"]
    #     dtext = "Blue"

    # if re.search(r'\b(green)\b', transcript, re.I) :
    #     vtext = "Changing text colour to green"

    #     __dcolor = colour["green"]
    #     dtext = "Green"

    # if re.search(r'\b(yellow)\b', transcript, re.I) :
    #     vtext = "Changing text colour to yellow"

    #     __dcolor = colour["yellow"]
    #     dtext = "Yellow"

    # if re.search(r'\b(violet)\b', transcript, re.I) :
    #     vtext = "Changing text colour to violet"

    #     __dcolor = colour["violet"]
    #     dtext = "Violet"
    '''
    if re.search("Reset|Rejuvenate|Clear|Forget", transcript, re.I) :
        vtext = "Clearing Device Settings"

        __dcolor = colour["white"]
        __tcscal = not False
        __vdisab = False
        __adisab = False
    '''
    # if re.search("Hello|Hallo|Hi|Hey", transcript, re.I) :
    #     vtext = "Hello"
    vtext = str(chatbot.get_response(transcript))
    print(vtext)
    if dtext is None:
        vresp = vtext
    else:
        vresp = dtext

    if vtext is not None:
        aresp = gtexttospeech.gtexttospeech(vtext)

    transcript = transcript.title()

    if __easter is True:
        _ = HP_Parser(transcript)

        if _[0] is not None or _[1] is not None:
            vtext = "Easter Egg Mode Command Triggered"
            aresp = _[0]
            vresp = _[1]

    if transcript == "I Am Half-Blood Prince" or transcript == "I Am Half Blood Prince" or transcript == "Alohomora":
        __easter = True

        vtext = "Enabling Easter Egg Mode"
        aresp = audio.load_audio(BASE_PATH + "/.audio/24000/Lead-In_Music.wav")

    if aresp is None and vtext is None and vresp is None:
        __return = False
    else:
        __return = True

    if __adisab is True and __vdisab is True:
        return (None, vtext, [colour["black"] for _ in range(8 * 8)])
    elif __adisab is True:
        return (None, vtext, vresp)
    elif __vdisab is True:
        return (aresp, vtext, [colour["black"] for _ in range(8 * 8)])
    else:
        return (aresp, vtext, vresp)