def setup(retinal_encoded_image_cv2_format):
    global sound_folder_location, is_setup, sound_sources, unit_vector_map

    retinal_encoded_image_width = len(retinal_encoded_image_cv2_format[0])
    retinal_encoded_image_height = len(retinal_encoded_image_cv2_format)

    # Generate unit vector map
    distance_to_near_plane = 0.5  # arbitrary distance
    pixel_width, pixel_height = ssf_core.calc_pixel_size(
        distance_to_near_plane, retinal_encoded_image_h_fov_rad,
        retinal_encoded_image_v_fov_rad, retinal_encoded_image_width,
        retinal_encoded_image_height)
    unit_vector_map = ssf_core.generate_unit_vector_map(
        pixel_width, pixel_height, retinal_encoded_image_width,
        retinal_encoded_image_height, distance_to_near_plane)

    sound_folder_location = get_current_path() + "/sound_files/"
    soundsink.activate()

    # Setting up the listner using the defaults specified
    # here: https://media.readthedocs.org/pdf/pyal/latest/pyal.pdf
    listener = SoundListener()
    listener.position = (0, 0, 0)
    listener.velocity = (0, 0, 0)
    # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
    listener.orientation = (0, 0, -1, 0, 1, 0)

    # Load the audio
    sample_audio = load_wav_file(sound_folder_location + "sample_audio.wav")

    # Setting up the sound sources for each receptive field (i.e. each
    # pixel in the retinal encoded image)
    for row in xrange(retinal_encoded_image_height):
        sound_sources.append([])
        for column in xrange(retinal_encoded_image_width):
            # A sound source is an object that emits sounds
            sound_sources[row].append(SoundSource(position=[0, 0, -1]))

            # Specifying if the source should loop the sound
            sound_sources[row][column].looping = True

            # Queueing appends the sound to the source for
            # processing and playback
            sound_sources[row][column].queue(sample_audio)

            # Setting the gain for each source:
            #   Assuming the total gain should sum up to the max of a single
            #   sample_audio file, then each sound sources gain should be
            #   divided by the number of sound emitters.
            sound_sources[row][column].gain = sound_sources[row][column].gain       \
                                              / (retinal_encoded_image_height       \
                                              * retinal_encoded_image_width)

            soundsink.play(sound_sources[row][column])

    soundsink.update()

    is_setup = True
    # Update position of snake's head.
    if snake_direction == 0:
        snake_ypos[0] -= snake_cell_size
    if snake_direction == 1:
        snake_xpos[0] += snake_cell_size
    if snake_direction == 2:
        snake_ypos[0] += snake_cell_size
    if snake_direction == 3:
        snake_xpos[0] -= snake_cell_size

    if snake_xpos[0] < 0:
        snake_xpos[0] = screen_width
    if snake_xpos[0] > screen_width:
        snake_xpos[0] = 0

    if snake_ypos[0] < 0:
        snake_ypos[0] = screen_height
    if snake_ypos[0] > screen_height:
        snake_ypos[0] = 0

    listener.position = (new_xposition(snake_xpos[0]), 0, new_yposition(snake_ypos[0]))

    # Render image of apple and snake on screen
    screen.fill((0, 255, 0))
    screen.blit(apple_image, apple_pos)
    for (x, y) in zip(snake_xpos, snake_ypos):
        screen.blit(snake_image, (x, y))

    print sound_source.position, listener.position
    sound_sink.update()
    pygame.display.update()
Ejemplo n.º 3
0
def callback(retinal_encoded_data):
    retinal_encoded_image = bridge.imgmsg_to_cv2(retinal_encoded_data,
                                                 desired_encoding="32FC1")

    retinal_encoded_image_width = len(retinal_encoded_image[0])
    retinal_encoded_image_height = len(retinal_encoded_image)

    if (retinal_encoded_image_width != 8 or retinal_encoded_image_height != 8):
        rospy.logerr("The retinal_encoded_image must be an 8 x 8 image!!!")

    # Loading the audio data
    row_one_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(C_5))  # top
    row_two_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(B))
    row_three_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(A))
    row_four_audio = load_wav_file(sound_folder_location +
                                   generate_sound_file_name(G))
    row_five_audio = load_wav_file(sound_folder_location +
                                   generate_sound_file_name(F))
    row_six_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(E))
    row_seven_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(D))
    row_eight_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(C_4))  # bottom

    global soundSources
    global soundSourcesSetup
    if not soundSourcesSetup:
        soundsink.activate()

        # Setting up the listner (can actually comment this all out)
        listener = SoundListener()
        listener.position = (0, 0, 0)  # default = (0, 0, 0)
        listener.velocity = (0, 0, 0)  # default = (0, 0, 0)
        # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
        # default = (0, 0, -1, 0, 1, 0)
        listener.orientation = (0, 0, -1, 0, 1, 0)

        # Setup sound sources for each "receptive field"
        # Create array of sound sources
        for y in xrange(retinal_encoded_image_height):
            soundSources.append([])
            for x in xrange(retinal_encoded_image_width):
                # A SoundSource is an object that emits sounds
                soundSources[y].append(SoundSource(position=[0, 0, 0]))
                # Specifying if the source should loop the sound
                soundSources[y][x].looping = True
                # Queueing appends the sound to the source for processing and playback
                if y == 0:
                    soundSources[y][x].queue(row_one_audio)
                elif y == 1:
                    soundSources[y][x].queue(row_two_audio)
                elif y == 2:
                    soundSources[y][x].queue(row_three_audio)
                elif y == 3:
                    soundSources[y][x].queue(row_four_audio)
                elif y == 4:
                    soundSources[y][x].queue(row_five_audio)
                elif y == 5:
                    soundSources[y][x].queue(row_six_audio)
                elif y == 6:
                    soundSources[y][x].queue(row_seven_audio)
                elif y == 7:
                    soundSources[y][x].queue(row_eight_audio)
                # Informing the SoundSink about the SoundSource so it knows a new sound emitter is available
                soundsink.play(soundSources[y][x])

                # TODO: fix start position
                soundSources[y][x].position = [
                    x - (retinal_encoded_image_width / 2),
                    y - (retinal_encoded_image_height / 2),
                    -random.randint(1, 9)
                ]

        soundsink.update()
        print('soundSources have been setup')

        soundSourcesSetup = True

    # TODO: update positions of sound sources
    x_scale_factor = 0.5
    z_power_scale_factor = 2.0

    gain_scaled = 1.0 / (retinal_encoded_image_width *
                         retinal_encoded_image_height)

    x_pos = 0
    for row in xrange(retinal_encoded_image_height):
        for column in xrange(retinal_encoded_image_width):
            # center x
            x_pos = column - ((retinal_encoded_image_width - 1.0) / 2.0)
            # scale x
            x_pos = x_pos * x_scale_factor  # right is positive

            # set to zero, since MeloSee doesn't use height
            y_pos = 0.0

            # distance
            z_pos = retinal_encoded_image[row][column]

            # Gain settings, dependant on z
            if math.isnan(z_pos) or                     \
               (z_pos == 0.0) or                        \
               (z_pos >= depth_camera_max_depth):
                soundSources[row][column].gain = 0.0
            else:
                soundSources[row][column].gain = gain_scaled

                # NB: z scaling is done after gain settings
                z_pos = depth_camera_min_depth + (
                    (z_pos - depth_camera_min_depth)
                    **(z_power_scale_factor * 1.0))

                soundSources[row][column].position = [x_pos, y_pos, -z_pos]

    soundsink.update()
def setup(retinal_encoded_image_cv2_format):
    global sound_folder_location, is_setup, sound_sources, unit_vector_map, gain_scaled, alert_sound_sources

    retinal_encoded_image_width = len(retinal_encoded_image_cv2_format[0])
    retinal_encoded_image_height = len(retinal_encoded_image_cv2_format)

    # Generate unit vector map
    distance_to_near_plane = 0.5 # arbitrary distance
    pixel_width, pixel_height = ssf_core.calc_pixel_size(distance_to_near_plane,
                                                         retinal_encoded_image_h_fov_rad,
                                                         retinal_encoded_image_v_fov_rad,
                                                         retinal_encoded_image_width,
                                                         retinal_encoded_image_height)
    unit_vector_map = ssf_core.generate_unit_vector_map(pixel_width,
                                                        pixel_height,
                                                        retinal_encoded_image_width,
                                                        retinal_encoded_image_height,
                                                        distance_to_near_plane)

    sound_folder_location = get_current_path() + "/sound_files/"
    soundsink.activate()

    # Setting up the listner using the defaults specified
    # here: https://media.readthedocs.org/pdf/pyal/latest/pyal.pdf
    listener = SoundListener()
    listener.position = (0, 0, 0)
    listener.velocity = (0, 0, 0)
    # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
    listener.orientation = (0, 0, -1, 0, 1, 0)

    # Load the audio
    large_water_sample = load_wav_file(sound_folder_location + "large_water_sample.wav")
    water_lapping_wind_sample = load_wav_file(sound_folder_location + "water_lapping_wind_sample.wav")
    top = load_wav_file(sound_folder_location + "top.wav")
    top_middle = load_wav_file(sound_folder_location + "top_middle.wav")
    middle = load_wav_file(sound_folder_location + "middle.wav")
    bottom_middle = load_wav_file(sound_folder_location + "bottom_middle.wav")
    bottom = load_wav_file(sound_folder_location + "bottom.wav")
    beep_short = load_wav_file(sound_folder_location + "beep_short.wav")

    # To avoid clipping, the gain for each sound source needs to be
    # scaled down relative to the number of sound emitters
    gain_scaled = 1.0 / (retinal_encoded_image_width * retinal_encoded_image_height)
    gain_scaled = gain_scaled + 0.02

    # Setting up the sound sources for each receptive field (i.e. each
    # pixel in the retinal encoded image)
    for row in xrange(retinal_encoded_image_height):
        sound_sources.append([])
        for column in xrange(retinal_encoded_image_width):
            # A sound source is an object that emits sounds
            sound_sources[row].append(SoundSource(position=[0, 0, 0]))
            
            # Specifying if the source should loop the sound
            sound_sources[row][column].looping = True
            
            # Queueing appends the sound to the source for 
            # processing and playback
            if row == 0:
                sound_sources[row][column].queue(top)
            elif row == 1:
                sound_sources[row][column].queue(top_middle)
            elif row == 2:
                sound_sources[row][column].queue(middle)
            elif row == 3:
                sound_sources[row][column].queue(water_lapping_wind_sample)
            elif row == 4:
                sound_sources[row][column].queue(large_water_sample)

            # Scale gain
            sound_sources[row][column].gain = gain_scaled

            # Play the sound
            soundsink.play(sound_sources[row][column])

    # Setting up the sound sources for the minimum distance alert
    # 0 is left, 1 is right
    # alert_sound_sources.append(SoundSource(position=[0, 0, 0]))
    # alert_sound_sources[0].looping = True
    # alert_sound_sources[0].queue(beep_short)
    # alert_sound_sources[0].gain = 0.0
    # soundsink.play(alert_sound_sources[0])
    # alert_sound_sources.append(SoundSource(position=[0, 0, 0]))
    # alert_sound_sources[1].looping = True
    # alert_sound_sources[1].queue(beep_short)
    # alert_sound_sources[1].gain = 0.0
    # soundsink.play(alert_sound_sources[1])

    soundsink.update()

    is_setup = True