Пример #1
0
def mainloop():
    listener = SoundListener()
    listener.orientation = (0, 0, -1, 0, 1, 0)

    sink = SoundSink()
    sink.activate()

    rain = place_sound([0, -7, -1], load_sound("rfrog.wav"), True)
    sink.play(rain)

    birds = place_sound([5, 5, 5], load_sound("chirps1.wav"), True)
    sink.play(birds)

    creek = place_sound([-7, -7, 7], load_sound("creek.wav"), True)
    sink.play(creek)

    while 1 == 1:
        birds.position = move_source_random(birds, -8, 8, 0, 7, -10, 10)
        char = ord(getch.getch())

        if char == 97:
            listener.orientation = orient_listener(listener.orientation, False)

        if char == 100:
            listener.orientation = orient_listener(listener.orientation, True)

        if char == 112:
            break

        sink.update()
def setup(retinal_encoded_image_cv2_format):
    global sound_folder_location, is_setup, sound_sources, unit_vector_map

    retinal_encoded_image_width = len(retinal_encoded_image_cv2_format[0])
    retinal_encoded_image_height = len(retinal_encoded_image_cv2_format)

    # Generate unit vector map
    distance_to_near_plane = 0.5  # arbitrary distance
    pixel_width, pixel_height = ssf_core.calc_pixel_size(
        distance_to_near_plane, retinal_encoded_image_h_fov_rad,
        retinal_encoded_image_v_fov_rad, retinal_encoded_image_width,
        retinal_encoded_image_height)
    unit_vector_map = ssf_core.generate_unit_vector_map(
        pixel_width, pixel_height, retinal_encoded_image_width,
        retinal_encoded_image_height, distance_to_near_plane)

    sound_folder_location = get_current_path() + "/sound_files/"
    soundsink.activate()

    # Setting up the listner using the defaults specified
    # here: https://media.readthedocs.org/pdf/pyal/latest/pyal.pdf
    listener = SoundListener()
    listener.position = (0, 0, 0)
    listener.velocity = (0, 0, 0)
    # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
    listener.orientation = (0, 0, -1, 0, 1, 0)

    # Load the audio
    sample_audio = load_wav_file(sound_folder_location + "sample_audio.wav")

    # Setting up the sound sources for each receptive field (i.e. each
    # pixel in the retinal encoded image)
    for row in xrange(retinal_encoded_image_height):
        sound_sources.append([])
        for column in xrange(retinal_encoded_image_width):
            # A sound source is an object that emits sounds
            sound_sources[row].append(SoundSource(position=[0, 0, -1]))

            # Specifying if the source should loop the sound
            sound_sources[row][column].looping = True

            # Queueing appends the sound to the source for
            # processing and playback
            sound_sources[row][column].queue(sample_audio)

            # Setting the gain for each source:
            #   Assuming the total gain should sum up to the max of a single
            #   sample_audio file, then each sound sources gain should be
            #   divided by the number of sound emitters.
            sound_sources[row][column].gain = sound_sources[row][column].gain       \
                                              / (retinal_encoded_image_height       \
                                              * retinal_encoded_image_width)

            soundsink.play(sound_sources[row][column])

    soundsink.update()

    is_setup = True
Пример #3
0
 def __init__(self):
     self.sink = SoundSink()
     self.sink.activate()
     self.listener = SoundListener()
     self.listener.orientation = (0,0,1,0,0,1)
     self.sources = [SoundSource(position = [i, 0, 0], pitch = 1) for i in range(-1, 2, 2)]
     self.intense_sources = [SoundSource(position = [i, 0, 0], pitch = 1) for i in range(-1, 2, 2)]
     #pitch: 5,4,3,2,1
     self.data = load_wav_file("./beep.wav")
     self.intense_data = load_wav_file("./high_beep.wav")
     '''
     for source in self.sources:
         #we want the sources to be looping
         source.looping = True
         source.queue(self.data)
     '''
     for source in self.intense_sources:
         source.looping = True
         source.queue(self.intense_data)
     self.threading_pool = []
     for mild_source in self.sources:
         t = sound_source(self.sink, mild_source)
         self.threading_pool.append(t)
     for intense_source in self.intense_sources:
         t = sound_source(self.sink, intense_source)
         self.threading_pool.append(t)
     #threading pool: mild left; mild right; i left; i right
     #self.sink.play(source1)
     #self.sink.play(source2)
     self.cutoff = [i * (config.MAX_DISTANCE / len(self.sources)) for i in range(len(self.sources))]
Пример #4
0
 def __init__(self, parent, wav_file_paths):
     self.parent = parent
     self.wav_file_paths = wav_file_paths
     self.snd_src = None
     self.sink = SoundSink()
     self.sink.activate()
     self.listener = SoundListener()
     self.sink.listener = self.listener
     self.snd_data = []
     for fp in wav_file_paths: self.snd_data.append( load_wav_file(fp) )
     self.wav_file_paths = wav_file_paths
     writeFile(self.parent.log_file_path, '%s, [audioOut], audioOut mod init.\n'%(get_time_stamp()))
import pygame
import random
from openal.audio import SoundSink, SoundSource, SoundListener
from openal.loaders import load_wav_file
from pygame.locals import QUIT, K_LEFT, K_RIGHT, KEYDOWN
import sys

__author__ = 'vamc'

#Initialize OpenAL related components
sound_sink = SoundSink()
sound_source = SoundSource()
listener = SoundListener()
sound_sink.activate()
sound_sink._listener = listener

source_sound_file = "asw.wav"
sound_data = load_wav_file(source_sound_file)
sound_source.queue(sound_data)
sound_source.looping = True

#initialize pygame and screen
pygame.init()
screen_width = 600
screen_height = 600
screen = pygame.display.set_mode((screen_width, screen_height))
screen.fill((0, 255, 0))
pygame.display.set_caption('Snake to the sound')

#Create Snake
snake_xpos = [300, 300, 300]
Пример #6
0
def callback(retinal_encoded_data):
    retinal_encoded_image = bridge.imgmsg_to_cv2(retinal_encoded_data,
                                                 desired_encoding="32FC1")

    retinal_encoded_image_width = len(retinal_encoded_image[0])
    retinal_encoded_image_height = len(retinal_encoded_image)

    if (retinal_encoded_image_width != 8 or retinal_encoded_image_height != 8):
        rospy.logerr("The retinal_encoded_image must be an 8 x 8 image!!!")

    # Loading the audio data
    row_one_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(C_5))  # top
    row_two_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(B))
    row_three_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(A))
    row_four_audio = load_wav_file(sound_folder_location +
                                   generate_sound_file_name(G))
    row_five_audio = load_wav_file(sound_folder_location +
                                   generate_sound_file_name(F))
    row_six_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(E))
    row_seven_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(D))
    row_eight_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(C_4))  # bottom

    global soundSources
    global soundSourcesSetup
    if not soundSourcesSetup:
        soundsink.activate()

        # Setting up the listner (can actually comment this all out)
        listener = SoundListener()
        listener.position = (0, 0, 0)  # default = (0, 0, 0)
        listener.velocity = (0, 0, 0)  # default = (0, 0, 0)
        # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
        # default = (0, 0, -1, 0, 1, 0)
        listener.orientation = (0, 0, -1, 0, 1, 0)

        # Setup sound sources for each "receptive field"
        # Create array of sound sources
        for y in xrange(retinal_encoded_image_height):
            soundSources.append([])
            for x in xrange(retinal_encoded_image_width):
                # A SoundSource is an object that emits sounds
                soundSources[y].append(SoundSource(position=[0, 0, 0]))
                # Specifying if the source should loop the sound
                soundSources[y][x].looping = True
                # Queueing appends the sound to the source for processing and playback
                if y == 0:
                    soundSources[y][x].queue(row_one_audio)
                elif y == 1:
                    soundSources[y][x].queue(row_two_audio)
                elif y == 2:
                    soundSources[y][x].queue(row_three_audio)
                elif y == 3:
                    soundSources[y][x].queue(row_four_audio)
                elif y == 4:
                    soundSources[y][x].queue(row_five_audio)
                elif y == 5:
                    soundSources[y][x].queue(row_six_audio)
                elif y == 6:
                    soundSources[y][x].queue(row_seven_audio)
                elif y == 7:
                    soundSources[y][x].queue(row_eight_audio)
                # Informing the SoundSink about the SoundSource so it knows a new sound emitter is available
                soundsink.play(soundSources[y][x])

                # TODO: fix start position
                soundSources[y][x].position = [
                    x - (retinal_encoded_image_width / 2),
                    y - (retinal_encoded_image_height / 2),
                    -random.randint(1, 9)
                ]

        soundsink.update()
        print('soundSources have been setup')

        soundSourcesSetup = True

    # TODO: update positions of sound sources
    x_scale_factor = 0.5
    z_power_scale_factor = 2.0

    gain_scaled = 1.0 / (retinal_encoded_image_width *
                         retinal_encoded_image_height)

    x_pos = 0
    for row in xrange(retinal_encoded_image_height):
        for column in xrange(retinal_encoded_image_width):
            # center x
            x_pos = column - ((retinal_encoded_image_width - 1.0) / 2.0)
            # scale x
            x_pos = x_pos * x_scale_factor  # right is positive

            # set to zero, since MeloSee doesn't use height
            y_pos = 0.0

            # distance
            z_pos = retinal_encoded_image[row][column]

            # Gain settings, dependant on z
            if math.isnan(z_pos) or                     \
               (z_pos == 0.0) or                        \
               (z_pos >= depth_camera_max_depth):
                soundSources[row][column].gain = 0.0
            else:
                soundSources[row][column].gain = gain_scaled

                # NB: z scaling is done after gain settings
                z_pos = depth_camera_min_depth + (
                    (z_pos - depth_camera_min_depth)
                    **(z_power_scale_factor * 1.0))

                soundSources[row][column].position = [x_pos, y_pos, -z_pos]

    soundsink.update()
Пример #7
0
#oh yeah
"""Utility functions for loading sounds."""
import os
import sys
import wave
from openal.audio import SoundData
from openal.loaders import load_wav_file
from openal.audio import SoundSink, SoundSource, SoundListener
import time
import math

if __name__ == "__main__":
    sink = SoundSink()
    sink.activate()
    listener = SoundListener()
    listener.orientation = (0,0,1,0,0,1)
    source1 = SoundSource(position=[0, 0, 3])
    source1.looping = True
    source2 = SoundSource(position=[0, 0, 3],pitch=2.0)
    source2.looping = True
    data2 = load_wav_file("./hey.wav")
    source1.queue(data2)
    source2.queue(data2)
    sink.play(source1)
    sink.play(source2)
    t = 0
    while True:
        x_pos = 5*math.sin(math.radians(t))
        source1.position = [x_pos, source1.position[1], source1.position[2]]
        source2.position = [0, source2.position[1], source2.position[2]]
        sink.update()
def setup(retinal_encoded_image_cv2_format):
    global sound_folder_location, is_setup, sound_sources, unit_vector_map, gain_scaled, alert_sound_sources

    retinal_encoded_image_width = len(retinal_encoded_image_cv2_format[0])
    retinal_encoded_image_height = len(retinal_encoded_image_cv2_format)

    # Generate unit vector map
    distance_to_near_plane = 0.5 # arbitrary distance
    pixel_width, pixel_height = ssf_core.calc_pixel_size(distance_to_near_plane,
                                                         retinal_encoded_image_h_fov_rad,
                                                         retinal_encoded_image_v_fov_rad,
                                                         retinal_encoded_image_width,
                                                         retinal_encoded_image_height)
    unit_vector_map = ssf_core.generate_unit_vector_map(pixel_width,
                                                        pixel_height,
                                                        retinal_encoded_image_width,
                                                        retinal_encoded_image_height,
                                                        distance_to_near_plane)

    sound_folder_location = get_current_path() + "/sound_files/"
    soundsink.activate()

    # Setting up the listner using the defaults specified
    # here: https://media.readthedocs.org/pdf/pyal/latest/pyal.pdf
    listener = SoundListener()
    listener.position = (0, 0, 0)
    listener.velocity = (0, 0, 0)
    # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
    listener.orientation = (0, 0, -1, 0, 1, 0)

    # Load the audio
    large_water_sample = load_wav_file(sound_folder_location + "large_water_sample.wav")
    water_lapping_wind_sample = load_wav_file(sound_folder_location + "water_lapping_wind_sample.wav")
    top = load_wav_file(sound_folder_location + "top.wav")
    top_middle = load_wav_file(sound_folder_location + "top_middle.wav")
    middle = load_wav_file(sound_folder_location + "middle.wav")
    bottom_middle = load_wav_file(sound_folder_location + "bottom_middle.wav")
    bottom = load_wav_file(sound_folder_location + "bottom.wav")
    beep_short = load_wav_file(sound_folder_location + "beep_short.wav")

    # To avoid clipping, the gain for each sound source needs to be
    # scaled down relative to the number of sound emitters
    gain_scaled = 1.0 / (retinal_encoded_image_width * retinal_encoded_image_height)
    gain_scaled = gain_scaled + 0.02

    # Setting up the sound sources for each receptive field (i.e. each
    # pixel in the retinal encoded image)
    for row in xrange(retinal_encoded_image_height):
        sound_sources.append([])
        for column in xrange(retinal_encoded_image_width):
            # A sound source is an object that emits sounds
            sound_sources[row].append(SoundSource(position=[0, 0, 0]))
            
            # Specifying if the source should loop the sound
            sound_sources[row][column].looping = True
            
            # Queueing appends the sound to the source for 
            # processing and playback
            if row == 0:
                sound_sources[row][column].queue(top)
            elif row == 1:
                sound_sources[row][column].queue(top_middle)
            elif row == 2:
                sound_sources[row][column].queue(middle)
            elif row == 3:
                sound_sources[row][column].queue(water_lapping_wind_sample)
            elif row == 4:
                sound_sources[row][column].queue(large_water_sample)

            # Scale gain
            sound_sources[row][column].gain = gain_scaled

            # Play the sound
            soundsink.play(sound_sources[row][column])

    # Setting up the sound sources for the minimum distance alert
    # 0 is left, 1 is right
    # alert_sound_sources.append(SoundSource(position=[0, 0, 0]))
    # alert_sound_sources[0].looping = True
    # alert_sound_sources[0].queue(beep_short)
    # alert_sound_sources[0].gain = 0.0
    # soundsink.play(alert_sound_sources[0])
    # alert_sound_sources.append(SoundSource(position=[0, 0, 0]))
    # alert_sound_sources[1].looping = True
    # alert_sound_sources[1].queue(beep_short)
    # alert_sound_sources[1].gain = 0.0
    # soundsink.play(alert_sound_sources[1])

    soundsink.update()

    is_setup = True