def process_file(audio_path, output, spkr_to_spkr, lstnr_to_spkr, ear_to_ear):
    """
    Read stereo binaural audio file and write wav file with crosstalk 'removed'
    """
    logger.info('Loading file into memory: {}'.format(audio_path))
    y, sr = audio.load(audio_path, mono=False, sr=44100)
    left = y[0]
    right = y[1]

    logger.info('Computing distance from speaker to each ear')
    d1, d2, theta = compute_geometry(spkr_to_spkr, lstnr_to_spkr, ear_to_ear)
    logger.debug('d1: {}'.format(d1))
    logger.debug('d2: {}'.format(d2))
    logger.debug('theta: {}'.format(theta))

    headshadow = headshadow_filter_coefficients(theta, ear_to_ear/2, sr)
    logger.debug('headshadow b: {} a: {}'.format(*headshadow))

    logger.info('Computing recursive crosstalk cancellation for left channel')
    l_left, l_right = cancel_crosstalk(left, d1, d2, headshadow, sr)
    logger.info('Computing recursive crosstalk cancellation for right channel')
    r_right, r_left = cancel_crosstalk(right, d1, d2, headshadow, sr)

    left = audio.sum_signals([l_left, r_left, left])
    right = audio.sum_signals([l_right, r_right, right])

    y = audio.channel_merge([left, right])
    logger.info('Writing output to: {}'.format(output))
    audio.write_wav(output, y, sr, norm=True)
Example #2
0
def loadSamplesFolder(path, fftShape=(1024, 32), fileLimit=200):
    X = []
    Y = []
    paths = []
    #Load paths
    for root, dirs, files in os.walk(path):
        for idx, file in enumerate(files):
            if file.endswith(".wav"):
                paths.append((root.split("/")[-1], os.path.join(root, file)))

    #get some random path
    paths = tensorFlowUtils.limitMultilabelSamples(paths, fileLimit)
    #Load the files and do the fft
    for label, path in paths:
        wave = audio.load(path)
        spectrogram, samplingRate = audio.performFFTs(wave)

        xn = np.fliplr(
            np.array([fft["frequencies"]
                      for i, fft in enumerate(spectrogram)])).transpose()

        X.append(xn)
        Y.append(eval(label))

    return np.array(X), np.array(Y)
Example #3
0
def process_file(audio_path, output, spkr_to_spkr, lstnr_to_spkr, ear_to_ear):
    """
    Read stereo binaural audio file and write wav file with crosstalk 'removed'
    """
    logger.info('Loading file into memory: {}'.format(audio_path))
    y, sr = audio.load(audio_path, mono=False, sr=44100)
    left = y[0]
    right = y[1]

    logger.info('Computing distance from speaker to each ear')
    d1, d2, theta = compute_geometry(spkr_to_spkr, lstnr_to_spkr, ear_to_ear)
    logger.debug('d1: {}'.format(d1))
    logger.debug('d2: {}'.format(d2))
    logger.debug('theta: {}'.format(theta))

    headshadow = headshadow_filter_coefficients(theta, ear_to_ear / 2, sr)
    logger.debug('headshadow b: {} a: {}'.format(*headshadow))

    logger.info('Computing recursive crosstalk cancellation for left channel')
    l_left, l_right = cancel_crosstalk(left, d1, d2, headshadow, sr)
    logger.info('Computing recursive crosstalk cancellation for right channel')
    r_right, r_left = cancel_crosstalk(right, d1, d2, headshadow, sr)

    left = audio.sum_signals([l_left, r_left, left])
    right = audio.sum_signals([l_right, r_right, right])

    y = audio.channel_merge([left, right])
    logger.info('Writing output to: {}'.format(output))
    audio.write_wav(output, y, sr, norm=True)
Example #4
0
    def __init__(self):
        core.GAME = self
        self.data = core._Data

        self.data.screen_size = (800, 600)
        self.data.master_spr_group = sprite._MasterGroup()
        self.data.painter = screen.Painter()
        
        self.data.master_clock = fltime.Clock()
        self.data.render_clock = fltime.Clock()
        self.data.logic_clock = fltime.Clock()
        fltime._set_master_clock(self.data.master_clock)
        
        self.data.master_eventlistener = event.EventListener()
        self.data.eventlistener = self.data.master_eventlistener

        ## Open ini file(s) here to load settings.
        audio.load("flamingo.backends.audio.pygame_mixer")
Example #5
0
def hrtf_file(audio_path, azimuth, elevation=0, distance=1, ear_distance=0.215, output=None):
    """
    Read mono audio file and write binaural wav file to output
    """
    logger.info('Loading signal into memory: {}'.format(audio_path))
    y, sr = audio.load(audio_path)
    y = hrtf(y, sr, azimuth, elevation, distance, ear_distance)
    if output:
        audio.write_wav(output, y, sr, norm=True)
    return y
Example #6
0
def initialize_overlay_tracks():
    """Initializes overlay tracks, to keep them in memory for later use"""
    if not os.path.exists(_OVERLAY_PATH):
        return
    for filename in os.listdir(_OVERLAY_PATH):
        if not filename.endswith('.mp3'):
            continue
        path = os.path.join(_OVERLAY_PATH, filename)
        track = audio.load(path)
        _OVERLAY_TRACKS.append(track)
Example #7
0
def initialize_panzer_tracks():
    """Initializes panzerfaust tracks, to keep them in memory for later use"""
    if not os.path.exists(_PANZER_PATH):
        return
    for filename in os.listdir(_PANZER_PATH):
        if not filename.endswith('.mp3'):
            continue
        path = os.path.join(_PANZER_PATH, filename)
        track = audio.load(path)
        _PANZER_TRACKS.append(track)
Example #8
0
 def load_tracks(self, filenames):
     """Loads files as pydub tracks"""
     app = self.parent.parentApp
     tracks = []
     for filename in filenames:
         app.notify('Loading {title}...'.format(title=filename))
         track = audio.load(filename)
         if not app._already_cut:
             track = audio.cut(track, app._track_length * 2)
         tracks.append(track)
     return tracks
Example #9
0
def writeSamples(midiPath, audioPath, outputPath):
    midi = midiProxy.loadMidiDrums(midiPath)
    audioLoad = audio.load(audioPath)
    wave = audioLoad[1]
    rate = audioLoad[0]

    #check correctness
    spectrogram, samplingRate = audio.performFFTs(audioLoad)
    audio.visualizeSpectrogram(wave=None,
                               spectrogram=spectrogram,
                               midi=midi,
                               name=midiPath)

    #lowest frequency = 10Hz = 0.1s per wave
    #time between 16th notes : 200bpm = 300 b/ms = 0.3 b/s = 0.075 16th/s
    step = 0.5
    samples = int(step * rate)
    preDelay = 0.05
    for midiEvent in midi:
        #get the name and the time of the midi event
        eventName = midiEvent[
            'notes']  #midiProxy.getVectorToNote(midiEvent['notes']) #from [0,0,1,0...] to [40, 36]
        time = midiEvent["startTime"]

        #if the event is not in the wave
        min = int(rate * (time - preDelay))
        max = int(rate * (time - preDelay)) + samples
        if min < 0 or max > len(wave):
            continue

        #create folder for the samples
        directory = outputPath + "/" + str(eventName)
        if not os.path.exists(directory):
            os.makedirs(directory)

        # fadein and fadeout to prevent aliasing in the fft ?
        # fadedWave = np.array([[int(wave[min+i][0] * fadeMask[i]), int(wave[min+i][1] * fadeMask[i])] for i in xrange(samples)], dtype = wave.dtype)

        #write the isolated wave from the sample
        audio.write(
            directory + "/" + audioPath.split("/")[-1] + str(time) + ".wav",
            rate, wave[min:max])
        #         audio.write(directory + "/" + str(time) + "f.wav", rate, fadedWave)

        print directory + "/" + audioPath.split("/")[-1] + str(time) + ".wav"
Example #10
0
def writeSamples(midiPath, audioPath, outputPath):
    midi = midiProxy.loadMidiDrums(midiPath)
    audioLoad = audio.load(audioPath)
    wave = audioLoad[1]
    rate = audioLoad[0]
    

    #check correctness
#     spectrogram, samplingRate = audio.performFFTs(audioLoad)
#     audio.visualizeSpectrogram(wave=None, spectrogram=spectrogram, midi=midi, name=midiPath)

    #lowest frequency = 10Hz = 0.1s per wave
    #time between 16th notes : 200bpm = 300 b/ms = 0.3 b/s = 0.075 16th/s
    step = 0.525 #window of the sound saved in seconds
    samples = int(step * rate) #number of samples to save
    preDelay = 0.05
    for midiEvent in midi:  
        #get the name and the time of the midi event
        eventName = midiEvent['notes'] #midiProxy.getVectorToNote(midiEvent['notes']) #from [0,0,1,0...] to [40, 36]
        time = midiEvent["startTime"]
        
        #if the event is not in the wave
        min = int(rate * (time - preDelay))
        max = int(rate * (time - preDelay)) + samples
        if min < 0 or max > len(wave):
            continue
        
        #create folder for the samples
        directory = outputPath + "/" + str(eventName)
        if not os.path.exists(directory):
            os.makedirs(directory)

        # fadein and fadeout to prevent aliasing in the fft ?
        # fadedWave = np.array([[int(wave[min+i][0] * fadeMask[i]), int(wave[min+i][1] * fadeMask[i])] for i in xrange(samples)], dtype = wave.dtype)
     
        #write the isolated wave from the sample
        audio.write(directory + "/" + audioPath.split("/")[-1] + str(time) + ".wav", rate, wave[min:max])
#         audio.write(directory + "/" + str(time) + "f.wav", rate, fadedWave)
        
        print directory + "/" +  audioPath.split("/")[-1] + str(time) + ".wav"
Example #11
0
def loadSamplesFolder(path, fileLimit = 200):
    X = []
    Y = []
    paths = []
    #Load paths
    for root, dirs, files in os.walk(path):
        for idx, file in enumerate(files):
            if file.endswith(".wav"):
                paths.append((root.split("/")[-1], os.path.join(root, file)))
     
    #get some random path
    paths = tensorFlowUtils.limitMultilabelSamples(paths, fileLimit)
    #Load the files and do the fft
    for label, path in paths:
        wave = audio.load(path)
        spectrogram, samplingRate = audio.performFFTs(wave)
        #audio.visualizeSpectrogram(wave=None, spectrogram=spectrogram, midi=None, name=label)
        
        xn = np.fliplr(np.array([fft["frequencies"] for i,fft in enumerate(spectrogram)])).transpose()
        X.append(xn)
        Y.append(eval(label))
    
    return np.array(X), np.array(Y) 
Example #12
0
ARGS = PARSER.parse_args()
print(ARGS)

# Load settings from config.CONFIG FILE
if not config.load(ARGS.config_file):
    print("Couldn't load config file [%s]" % ARGS.config_file)
    exit(1)

# Load logger
if not logger.load():
    print("Couldn't create logger file")
    exit(2)

# Set SOUND DEVICE
if audio.load():
    logger.LOGGER.info("SOUND-DEVICE: {}".format(audio.DEVICE.default.device))
else:
    logger.LOGGER.error(audio.DEVICES_LIST)
    exit(1)

# Set MIDI DEVICE
if midi.load():
    logger.LOGGER.info("MIDI DEVICE: {}".format(midi.DEVICE))
else:
    logger.LOGGER.error(midi.DEVICES_LIST)
    exit(1)

# Send PANIC to MIDI and wait for silence, JIC
logger.LOGGER.info("Waiting for MIDI silence at %s" % midi.DEVICE)
midi.DEVICE.panic()
Example #13
0
    def __init__(self, path):
        self.path = path
        global _game
        _game = self

        self.show_title(True)

        self.input = controls.Input()

        self.font = al_load_font(self.path + "/data/JosefinSans-Regular.ttf",
                                 -12, 0)
        if not self.font:
            print("Cannot find data")
            sys.exit(1)
        self.font_big = al_load_font(
            self.path + "/data/JosefinSans-Regular.ttf", -48, 0)

        colors = ["red", "green", "blue", "black", "white", "yellow", "purple"]
        self.mage_p = []
        self.raft_p = []
        for i in range(7):
            self.raft_p.append(
                mesh.read_frames(self.path + "/data/raft%d.mesh.gz" % (1 + i)))
            self.mage_p.append(
                mesh.read_frames(self.path +
                                 "/data/%s mage_fire_outfit.mesh.gz" %
                                 colors[i]))
        self.river = mesh.read_frames(self.path + "/data/perlin.mesh.gz")
        self.dragon_p = mesh.read_frames(self.path + "/data/dragon.mesh.gz")
        self.pine_p = mesh.read_frames(self.path + "/data/pine.mesh.gz")
        self.finish_p = mesh.read_frames(self.path + "/data/finish.mesh.gz")
        self.wolf_p = mesh.read_frames(self.path + "/data/wolf.mesh.gz")

        self.roar = audio.load(self.path + "/data/wumpus dines.ogg")
        self.swoosh = audio.load(self.path + "/data/swoosh.ogg")
        self.yelp = audio.load(self.path + "/data/yelp.ogg")
        self.jingle = audio.load(self.path + "/data/jingle.ogg")
        self.rubber = audio.load(self.path + "/data/rubber.ogg")
        self.growl = audio.load(self.path + "/data/growl.ogg")
        self.chew = audio.load(self.path + "/data/chew.ogg")
        self.dogyelp = audio.load(self.path + "/data/dogyelp.ogg")

        self.zoom = 0
        self.rotation = pi / 4
        self.scroll = 20
        self.camera = camera.Camera()
        self.rotate_camera(0)
        self.paused = False
        self.title = title.Title()
        self.silver = al_color_name("silver")

        self.t = 0
        self.spawn = [(128 * 9 - 30, 10), (128 * 10 + 40, 10),
                      (128 * 11 + 30, 110), (128 * 12 + 0, 10),
                      (128 * 13 + 0, 10), (128 * 13 + 64, 110),
                      (128 * 14 + 0, 10), (128 * 14 + 64, 110),
                      (128 * 15 + 0, 10)]

        self.fps = [0, 0, 0, 0, 0, 0]
        self.fps_t = 0

        self.landscape = landscape.Landscape(self.river)

        self.actors = actor.Actors(self.landscape)

        with open(self.path + "/data/objects.json", "r") as j:
            self.objects = json.load(j)
            for o in self.objects:
                t = self.actors.new(self.pine_p,
                                    o["x"],
                                    o["y"],
                                    radius=8,
                                    scale=8,
                                    static=True)
                t.cam.rotate(vector.z, random.uniform(-pi, pi))
                t.cam.rotate(vector.y, pi / 8)

        t = self.actors.new(self.finish_p,
                            128 * 16 - 32,
                            48,
                            z=20,
                            scale=20,
                            static=True)
        t.cam.rotate(vector.z, -pi / 2)
        t.cam.rotate(vector.y, pi / 8)

        self.picked = None

        self.resize(1280, 720)

        self.raft = []
        self.raft_and_wolf = []
        for i in range(7):
            x = 16 + skip
            y = 64
            r = self.actors.new(self.raft_p[i], x, y)
            r.color = al_color_name(colors[i])
            r.color_index = i
            self.raft.append(r)
            self.raft_and_wolf.append(r)

        self.dragon = self.actors.new(self.dragon_p,
                                      -100,
                                      64,
                                      flying=True,
                                      scale=5,
                                      z=20)

        self.scroll_camera(self.scroll)
        self.red = al_color_name("crimson")
Example #14
0
    'Output directory (will be created if not present): [./tracks]'
) or './tracks'
length = input('Track length, in seconds: [70] ') or 70
length = int(length)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

filenames = utils.get_filenames(input_dir)
filenames_length = len(filenames)
for i, filename in enumerate(filenames.values(), start=1):
    pretty_print(
        'Converting file {i}/{total}... [{filename}]'.format(
            i=i,
            total=filenames_length,
            filename=os.path.basename(filename),
        ),
    )
    track = audio.load(filename)
    track = audio.cut(track, length * 1000)
    new_filename = os.path.join(
        output_dir,
        os.path.basename(filename),
    )
    track.export(new_filename, format='mp3', bitrate='256k')
    # Copy metadata, too
    data = EasyID3(filename)
    data.save(new_filename, v1=2)
print()
print('Done.')