示例#1
0
 def MixAndPlay(self, index1, index2):
     for x in self.list:
         if index1 == (self.list.index(x)+1):
             for y in self.list:
                 if index2 == (self.list.index(y)+1):
                     output = x.overlay(y)
                     playback.play(output)
示例#2
0
def checkGrades(html_doc):
    soup = BeautifulSoup(html_doc, 'html.parser')
    # find tables
    tables = soup.findAll("table")
    # courses table is at index 1
    course_table_index = 1
    rows = tables[course_table_index].find_all("tr")

    # traverse courses except header row
    for i in range(1, len(rows)):
        
        # get course row
        row = rows[i]
        
        # find all values belonging to course
        cols = row.find_all("td")
        cols = [ele.text.strip() for ele in cols]
        
        # create course object
        course = Course(cols)
        
        # check if it is already in dictionary,
        # if it does, check if a grade has been given and play sound
        if course.code in courses:
            old_course = courses[course.code]
            if old_course.grade != course.grade:
                play(song)
                print("New grade has been given")
                print("Course Name: %s" % course.name)
                print("Grade: %s" % course.grade)
        # add new course to dictionary
        else:
            print("New course has been added.")
            print(course.to_string())
            courses[course.code] = course
示例#3
0
 def play(self, skip_silence=True):
     self.tree_view()
     if skip_silence:
         seg = self.audio_segment[self.speech_start:]
     else:
         seg = self.audio_segment
     print '-' * 40, self.language(), self.confidence, '[%s]' % self.transcription
     play(seg)
示例#4
0
def play(path):
    """

    :param path: path should be a relative path
    :return: None
    """
    # try:
    playback.play(AudioSegment.from_mp3(__dir__ + path))  # play mp3
示例#5
0
 def say(self, sentence):
     print "say method"
     fullSentence = AudioSegment.from_mp3(sentence[0])
     for i in range(1, len(sentence)):
         print "sentence[i] ", sentence[i]
         wordToPlay = AudioSegment.from_mp3(sentence[i])
         fullSentence += wordToPlay
     print "Ready to play"
     play(fullSentence)
示例#6
0
def play_audio(signal):
	print "Playing audio..."
	audio_segment = AudioSegment(
    signal.tobytes(), 
    frame_rate=44100,
    sample_width=4, 
    channels=1
	)
	play(audio_segment)
示例#7
0
def playSample(sample):
	""" 
	Input: a .wav file
	Plays the current song in the REPL interface
	"""
	try:
		play(sample)
	except:
		print "File not found"
示例#8
0
def play_thread():
	global do_play
	while True:
		if do_play:
			play(song)
			do_play = False
		sleep(0.5)
		if abort:
			break
示例#9
0
    def process_event(self, event):
        sound = None

        # new game event
        if "newgame" in event:
            sound = self.get_sound_byte(self.get_sound_from_dir("new_game"))
            self.previous_goal = ""
            self.consecutive_goals = 0
            self.total_goals = 0

        # goal event
        elif "A" is event[0] or "B" is event[0]:  # A or B is the 1st character
            self.total_goals += 1
            current_goal = event[0]      # extract which goal "A" or "B"
            a_num_goals = int(event[2])  # extract number of A and B goals
            b_num_goals = int(event[4])

            # first goal
            if self.previous_goal == "":
                self.consecutive_goals = 1
                sound = self.get_sound_byte(self.get_sound_from_dir("opener_goal"))

            # goal streak
            elif self.previous_goal == current_goal:
                self.consecutive_goals += 1
                sound = self.get_sound_byte(self.consecutive_goal_sound[self.consecutive_goals]())

            # comeback and all other goals
            else:
                if self.consecutive_goals == 4:
                    sound = self.get_sound_byte(self.get_sound_from_dir("comeback"))
                else:
                    sound = self.get_sound_byte(self.get_sound_from_dir("goal"))
                self.consecutive_goals = 1

            # announce score
            if sound:
                if 0 < a_num_goals < 5 and 0 < b_num_goals < 5:
                    sound += self.get_sound_byte(self.get_sound_from_number_dir(a_num_goals))
                    sound += self.get_sound_byte(self.get_sound_from_number_dir(b_num_goals))
                elif a_num_goals == 5:
                    sound += self.get_sound_byte(self.get_sound_from_dir("bluewin"))
                elif b_num_goals == 5:
                    sound += self.get_sound_byte(self.get_sound_from_dir("redwin"))

            self.previous_goal = current_goal

        # unknown event
        else:
            pass

        if sound:
            play(sound)
def request_reviews(token):
    song = AudioSegment.from_mp3("doorbell.mp3")
    headers = {'Authorization': token, 'Content-Length': '0'}

    logger.info("Requesting certifications...")
    certs_resp = requests.get(CERTS_URL, headers=headers)
    certs_resp.raise_for_status()

    certs = certs_resp.json()
    project_ids = [cert['project']['id'] for cert in certs if cert['status'] == 'certified']

    logger.info("Found certifications for project IDs: {}".format(str(project_ids)))
    logger.info("Polling for new submissions...")

    for pid in itertools.cycle(project_ids):
        try:
            resp = requests.post(ASSIGN_URL.format(pid = pid), headers=headers)
        except requests.exceptions.ConnectionError as e:
            logger.info("Failure to connect, Retrying")

        if resp.status_code == 201:
            submission = resp.json()
            play(song)
            logger.info("")
            logger.info("=================================================")
            logger.info("You have been assigned to grade a new submission!")
            logger.info("Project:" + str(project[pid]))            
            logger.info("View it here: " + REVIEW_URL.format(sid = submission['id']))
            logger.info("=================================================")
            logger.info("Continuing to poll...")
            p.pushNote(devices[0]["iden"], str(project[pid]),'Project to Review') #Push to iPhone/apple Watch
            
        elif resp.status_code == 404:
            logger.debug("{} returned {}: No submissions available."
                .format(resp.url, resp.status_code))
        elif resp.status_code in [400, 422]:
            logger.debug("{} returned {}: Assigned submission limit reached."
                .format(resp.url, resp.status_code))

        else:
            print("FAILURE!!: ",resp.status_code)
            time.sleep(5.0)
            #resp.raise_for_status()

        time.sleep(1.0 / REQUESTS_PER_SECOND)
示例#11
0
文件: main.py 项目: bengouma/fedora
def press():
    global score
    global scoreLabel
	#increments and prints the score by one
    newScore = score + 1
    score = newScore
    scoreLabel.config(text = "Score: " + str(newScore))
	
	#Calls the returnToFifteen function to rotate the image fifteen degrees
    rotateToFifteen()
    root.update()
	
	#Once the returnToFifteen function is finished, the returnToZero function is called
    root.after(1000, rotateToZero)
	
	#Plays the m'lady audio segment
    lady = AudioSegment.from_mp3("m'lady.mp3")
    play(lady)
示例#12
0
            if (t_word == w_corr[j]):
                word[i] = w_corr[j]
                t = t + 1
                for k in range(w_size - i - 2):
                    word[i + 1 + k] = word[i + 2 + k]

    word = word[0:w_size - t]

    quote = "ˈ"
    y = 0
    for i in range(len(word)):
        if (word[i] == quote):
            y = y + 1
            for k in range(len(word) - i - 1):
                word[i + k] = word[i + 1 + k]

    word = word[0:len(word) - y]
    print(word)
    dur = (len(word) / 5) * 1200
    sound = AudioSegment.silent(duration=dur)
    pos = 0
    for i in range(len(word)):
        pos = pos + 120
        if ((word[i] == " ") | (word[i] == "*")):
            word[i] = "_"
        sound1 = AudioSegment.from_mp3(
            '/home/david/Escritorio/Proyectos/lou/' + str(word[i]) + '.wav')
        sound = sound.overlay(sound1, position=pos)
    play(sound)
    sound.export("test.mp3")
示例#13
0
# import required libraries
from pydub import AudioSegment
from pydub.playback import play

# Import an audio file
# Format parameter only
# for readability
# /Users/dan/Documents/ws/robot-media/wav-files
wav_file = AudioSegment.from_file(
    file="/Users/dan/Documents/ws/robot-media/wav-files/R2D2-yeah.wav",
    format="wav")

# Play the audio file
play(wav_file)
示例#14
0
 def song3_start(self):
     play(song3)
示例#15
0
 def song1_start(self):
     play(song1)
示例#16
0
 def read_poem_site(self):
     message = "The poetry foundation poem of the day is {} by {}".format(self.title,self.author)
     tts(message)
     mp3 = request.urlopen(self.audio_url).read()
     poem = AudioSegment.from_mp3(BytesIO(mp3))
     playback.play(poem)
示例#17
0
文件: piclock.py 项目: cwk9/piclock
    def checkinput():
        global alarmstatus
        if (GPIO.input(7)):
            alarmstatus = "OFF"
        else:
            alarmstatus = "ON"
            time.sleep(1)  # slow down checking to one second

#Load alarm wave sound
#alarmwav = AudioSegment.from_wav(config['ALARMWAVS']['0'])
alarmwavs_temp = [row[1] for row in config.items('ALARMWAVS')]
for alwv in alarmwavs_temp:
    alarmwavs = (AudioSegment.from_wav(alwv))

#test sound. remove when not needed
play(alarmwavs)

#What we do when an alarm is triggered
def ring_ring():
    sys.stdout.write('ring ring\n')
    play(alarmwavs[0])
    sys.stdout.flush()

#Create an alarm clock object for every alarm in the config file.
def createclocks():
    # Create our clock objects
    clocks = [Clock() for i in range(len(config.items('ALARMS')))]
    # Set the alarm and start the clock.
    alcount = 0
    for cl in clocks:
        cl.set_alarm(config['ALARMS'][str(alcount)].split(":")[0], config['ALARMS'][str(alcount)].split(":")[1])
示例#18
0
 def _play_stop(self, *args):
     play(STOP_SOUND)
     GLib.timeout_add_seconds(0.1, self._next)
    if status == MIFAREReader.MI_OK:
        if rstCount == 0:
            call(["espeak", "-s140 -ven+18 -z", "Module 1"])
        elif rstCount == 1:
            call(["espeak", "-s140 -ven+18 -z", "Module 2"])
        elif rstCount == 2:
            call(["espeak", "-s140 -ven+18 -z", "Module 3"])
        elif rstCount == 3:
            call(["espeak", "-s140 -ven+18 -z", "Module 4"])

        # Print UID
        cardID = str(uid[3])
        print "card ID is", cardID

        if int(cardID) == 185:
            play(audio1)
        elif int(cardID) == 41:
            play(audio2)
        elif int(cardID) == 89:
            play(audio3)
        elif int(cardID) == 163:
            play(audio4)

        # This is the default key for authentication
        key = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]

        # Select the scanned tag
        MIFAREReader.MFRC522_SelectTag(uid)

        # Authenticate
        status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, 8, key,
示例#20
0
def play_error():
    # Error sound:
    system_error = AudioSegment.from_file_using_temporary_files(
        'Z:\stream\gg\Windows_error.mp3')
    play(system_error)
示例#21
0
keyboard.add_hotkey(101, lambda: open_spotify())
# F15
keyboard.add_hotkey(102, lambda: (add_win()))
# F16
keyboard.add_hotkey(103, lambda: (add_loss()))
# F17
keyboard.add_hotkey(104, lambda: play_error())
# F18

# F19
keyboard.add_hotkey(106, lambda: play_gg_sound())
# F20

# ABBREVIATIONS FOR SIMPLE QUICK TYPING
# GMAIL
keyboard.add_abbreviation('@@', '*****@*****.**')
# PROTONMAIL
keyboard.add_abbreviation('@@@', '*****@*****.**')
#Twitch
keyboard.add_abbreviation('TTV', 'https://twitch.tv/namastez')
# MISCELLANEOUS
keyboard.add_abbreviation('tm', u'™')
keyboard.add_abbreviation('cr', u'©')
keyboard.add_abbreviation('^^2', u'²')

play(system_online)  # Plays a sound when program starts
start_scoring()  # Makes sure the .txt files for scoring are reset
keyboard.hook(
    print_pressed_keys)  # Shows the value for currently pressed key in console
keyboard.wait()  # "While True"
示例#22
0
文件: piclock.py 项目: cwk9/piclock
def ring_ring():
    sys.stdout.write('ring ring\n')
    play(alarmwavs[0])
    sys.stdout.flush()
示例#23
0
    next_obs
    time += time_per_gym_step

    data_list = []  #this is the lsit we add to the csv file

    for n, j in enumerate(env_instance.env.robot.ordered_joints):
        action_torque = env_instance.env.robot.power * j.power_coef * float(
            np.clip(a.reshape(a.size, )[0], -1, +1))  #add joint torque
        data_list.append(action_torque)  #add joint toques

    for n, j in enumerate(env_instance.env.robot.ordered_joints):
        data_list.append(j.get_position())  #add joint positions

    for n, j in enumerate(env_instance.env.robot.ordered_joints):
        data_list.append(j.get_position())  #add joint velocities

    data_list.append(time)  #append timestamp

    filewriter_for_link_7_walker.writerow(data_list)
    if env_instance.env.robot.walk_target_dist < 1e3 - distance_reset_increments:
        env_instance.env.robot.walk_target_x += distance_reset_increments
        distance_traveled += distance_reset_increments
    print("Distance Traveled: ", distance_traveled)
    print("Distance to target : ", env_instance.env.robot.walk_target_dist)
    if env_instance.env.robot.body_xyz[
            2] < 0.7:  #robots torso is low enough that it might have fallen on knees
        play(song)  #ding when fallen
        break
    #if done == True:
    #	break
示例#24
0
    return r


if __name__ == '__main__':

    appId = getappId()

    while True:
        text = input(">>")

        response_text = requests_dialogtext(appId, text)

        r = requests_voicedata(response_text)

        # 一時ファイルへ書き込み
        if r.status_code == 200:
            with open(tmp_filename, 'wb') as fd:
                for chunk in r.iter_content(chunk_size=256):
                    fd.write(chunk)

        # 応答のテキスト表示
        print("AIちゃん: {0}".format(response_text))

        # 一時ファイルから再生
        aac_version = AudioSegment.from_file(tmp_filename, "aac")
        play(aac_version)

        # 一時ファイルの削除
        os.remove(tmp_filename)
示例#25
0
from clock import Clock
from pydub import AudioSegment
from pydub.playback import play
import time

with Clock(seconds=30):
    alarm_file = "/home/pi/AI-Smart-Mirror/sample-audio-files/martian-gun.mp3"
    song = AudioSegment.from_mp3(alarm_file)
    while True:
        play(song)
        time.sleep(2)

print("Hello Test")
def construct_and_play(p):
    '''Takes a ParameterValues object and constructs and plays the relevant AudioSegment. 
    '''

    # Construct the background.
    if p.first_bg == 'vague':
        first_bg_note = vague_background[random.randint(
            0,
            len(vague_background) - 1)]
    elif p.first_bg == 'pointed':
        first_bg_note = pointed_background[random.randint(
            0,
            len(pointed_background) - 1)]

    bg = get_new_layer(current_layer=first_bg_note,
                       note_type=0,
                       num_of_note_type=p.num_bg,
                       num_overlaps_allowed=p.num_of_overlaps_allowed,
                       pointed_sounds_min=p.pointed_sounds_allowed_min,
                       pointed_sounds_max=p.pointed_sounds_allowed_max,
                       pan_min=p.pan_min_allowed,
                       pan_max=p.pan_max_allowed,
                       num_reversals_allowed=p.num_of_reversals_allowed,
                       fade_in_time=100,
                       fade_out_time=100,
                       low=500,
                       high=1000,
                       inc_high_overlap=500,
                       inc_low=500,
                       inc_high=500)

    # Construct the background plus the notes.
    bg_plus_notes = get_new_layer(
        current_layer=bg,
        note_type=1,
        num_of_note_type=p.num_of_notes,
        num_overlaps_allowed=p.num_of_overlaps_allowed,
        pointed_sounds_min=p.pointed_sounds_allowed_min,
        pointed_sounds_max=p.pointed_sounds_allowed_max,
        pan_min=p.pan_min_allowed,
        pan_max=p.pan_max_allowed,
        num_reversals_allowed=p.num_of_reversals_allowed,
        fade_in_time=p.fade_time_notes,
        fade_out_time=p.fade_time_notes,
        low=4000,
        high=6000,
        inc_high_overlap=500,
        inc_low=3000,
        inc_high=3000)

    # Construct the background plus notes plus effects.
    bg_plus_notes_plus_effects = get_new_layer(
        current_layer=bg_plus_notes,
        note_type=2,
        num_of_note_type=p.num_of_effects,
        num_overlaps_allowed=p.num_of_overlaps_allowed,
        pointed_sounds_min=p.pointed_sounds_allowed_min,
        pointed_sounds_max=p.pointed_sounds_allowed_max,
        pan_min=p.pan_min_allowed,
        pan_max=p.pan_max_allowed,
        num_reversals_allowed=p.num_of_reversals_allowed,
        fade_in_time=p.fade_time_effects,
        fade_out_time=p.fade_time_effects,
        low=6000,
        high=9000,
        inc_high_overlap=500,
        inc_low=3000,
        inc_high=3000).fade_in(500).fade_out(3000)

    # Play the new AudioSegment.
    play(bg_plus_notes_plus_effects)
示例#27
0
 def play(self, index):
     sound = AudioSegment.from_mp3(self.list[index - 1])
     playback.play(sound)
def player(filename):
	song = getAudio(filename)
	playback.play(song)
	return filename
 def play (self, index):
     song = AudioSegment.from_mp3(self.list[index])
     playback.play(song)
示例#30
0
from pydub import AudioSegment, playback

AudioSegment.converter = "ffmpeg\\ffmpeg.exe"
sound1 = AudioSegment.from_mp3("1.mp3")
sound2 = AudioSegment.from_mp3("2.mp3")
output = sound1.overlay(sound2)
playback.play(output)
示例#31
0
 def mixAndPlay(self, index1, index2):
     sound1 = AudioSegment.from_mp3(self.list[index1 - 1])
     sound2 = AudioSegment.from_mp3(self.list[index2 - 1])
     output = sound1.overlay(sound2)
     playback.play(output)
示例#32
0
from pydub import AudioSegment
from pydub.playback import play

sound = AudioSegment.from_file("./"+"/words_us/cavern_us.mp3", format="mp3")
sound2 = AudioSegment.from_file("./"+"/words_us/aardvark_us.mp3", format="mp3")
bgm = AudioSegment.from_file("bgm.mp4", format="mp4")
#source:
#https://stackoverflow.com/questions/51434897/how-to-change-audio-playback-speed-using-pydub
def speed_change(sound, speed=1.0):
    sound_with_altered_frame_rate = sound._spawn(sound.raw_data, overrides={
         "frame_rate": int(sound.frame_rate * speed)
      })
    return sound_with_altered_frame_rate.set_frame_rate(sound.frame_rate)

#backround 
play(bgm[:10000]-5)
#concatinate
sound3 = sound+sound2
play(sound3)
#overlay sound3 with bgm
play(sound3.overlay(bgm[:10000]-7))
#pan to left channel
play(sound3.pan(-.99))
#set up speed up and down
slow_sound = speed_change(sound2, 0.75)
fast_sound = speed_change(sound, 2.0)
sound4 = fast_sound+slow_sound
play(sound4)
#reverse
reverse_sound = sound3.reverse()
play(reverse_sound)
示例#33
0
 def song2_start(self):
     play(song2)
示例#34
0
        import matplotlib.pyplot as plt
        import librosa
        import numpy as np
        wav, source_sr = librosa.load(args.input, sr=None)
        wav = librosa.resample(wav, source_sr, sampling_rate)
        wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)

        speaker_wav = wav[int(args.speaker_segment[0] *
                              sampling_rate):int(args.speaker_segment[1] *
                                                 sampling_rate)]

        print(
            "Playing the selected audio segment at given offsets to check it is alright"
        )
        audio = AudioSegment.from_wav(args.input)
        play(audio[int(args.speaker_segment[0] *
                       1000):int(args.speaker_segment[1] * 1000)])
        if input("Is this correct? (y/n)\n") != "y":
            exit(0)

        encoder = VoiceEncoder("cpu")
        speaker_embed = encoder.embed_utterance(speaker_wav)
        similarities = []

    # we will loop through the silences and try to find silences smaller than args.max_duration seconds and bigger than one second greedily by trying to cut on the biggest silences. Thus we will skip the first and last audio sample but we don't care
    sent_index, i, lost_seconds = 0, 0, 0
    to_save = []  # (audio, file_name)
    prog_bar = tqdm(total=len(silences))
    while i < len(silences):
        start_period = silences[i][0] + args.min_duration
        end_period = silences[i][0] + args.max_duration
        j, last_med_silence, last_short_silence, last_long_silence = 1, None, None, None
示例#35
0
from pydub import AudioSegment  #imports library to access audio files
from pydub.playback import play  #imports library to play audio file

sound = AudioSegment.from_wav(
    "left_intro_wav.wav")  # sets variable as audio file
play(sound)  # control sound; sound in its regular volume

two_second = 2 * 1000  #selects two seconds of the file
firstone = sound[:two_second]  #sets it as a variable
louder = firstone + 10  #increases volume
play(louder)  #plays the file with increased volume

quieter = firstone - 10  #decreases volume of audio file
play(quieter)  #plays the file with decreased volume
示例#36
0
文件: test.py 项目: EoinDavey/Muse
import Calliope.parser as parser
import Erato.audio_engine as audio_engine
from pydub.playback import play
import sys

lines = sys.stdin.read()
p = parser.parseToAudio(lines)
seg = audio_engine.generate_audio(p)
play(seg)
示例#37
0
 def play_sounds_thread(paths, volumes, delay):
     if delay:
         time.sleep(delay)
     for path, volume in zip(paths, volumes):
         song = AudioSegment.from_mp3(path)
         play(song + volume)
示例#38
0
    def Play(self):
	pb.play(self.Sound)
	pass
示例#39
0
文件: hw9.py 项目: DauMinhHoa/C4E4
 def play(self, index):
     playback.play(AudioSegment.from_mp3(self.song_list[index]))
示例#40
0
 def mixandplay(self,index1,index2):
     sound1=AudioSegment.from_mp3(self.songlist[index1])
     sound2=AudioSegment.from_mp3(self.songlist[index2])
     output = sound1.overlay(sound2)
     playback.play(output)
output1 = output.overlay(sound3, position=100) + 5
output2 = output.overlay(sound4, position=170) + 2
output2 = speed_change(output2, 1)
#output2.export("hello.mp3",format='mp3')
#play(output2)

sound11 = AudioSegment.from_mp3(
    '/home/david/Escritorio/Proyectos/sounds/w.wav') + 10
sound22 = AudioSegment.from_mp3(
    '/home/david/Escritorio/Proyectos/sounds/ɜ.wav') + 11
sound33 = AudioSegment.from_mp3(
    '/home/david/Escritorio/Proyectos/sounds/r.wav') + 10
sound44 = AudioSegment.from_mp3(
    '/home/david/Escritorio/Proyectos/sounds/l.wav')
sound55 = AudioSegment.from_mp3(
    '/home/david/Escritorio/Proyectos/sounds/d.wav')
sound55 = speed_change(sound55, 0.88)

sound11 = soundd.overlay(sound11, position=0)
outputt = sound11.overlay(sound22, position=200)  #400
outputt = outputt.overlay(sound33, position=350)  #450
outputt = outputt.overlay(sound44, position=380)  #500
outputt = outputt.overlay(sound55, position=400)  #550

outputt = speed_change(outputt, 1)

outputt = output2.overlay(outputt, position=1000)

output2.export("hello.mp3", format='mp3')
play(outputt)
示例#42
0
                    part="id",
                    maxResults=1,
                    # Limit search to Music category
                    videoCategoryId="sGDdEsjSJ_SnACpEvVQ6MtTzkrI/nqRIq97-xe5XRZTxbknKFVe5Lmg",
                    safeSearch="none",
                    type="video",
                    videoLicense="any",
                    videoEmbeddable="any",
                )
                .execute()
            )

            videos = search["items"]
            if len(videos) != 1:
                print("No videos found. Skipping.")
                continue
            else:
                yt_id = search["items"][0]["id"]["videoId"]

        ydl_opts = {"format": "bestaudio", "logtostderr": True, "outtmpl": "/tmp/%(id)s"}
        ydl = youtube_dl.YoutubeDL(ydl_opts)
        try:
            ydl.download(["https://www.youtube.com/watch?v=" + yt_id])
        except youtube_dl.utils.DownloadError:
            print("Song download failed. Skipping.")
            continue

        song = AudioSegment.from_file("/tmp/" + yt_id)
        play(song)
        remove("/tmp/" + yt_id)
示例#43
0
# TODO: The sounds should be recorded and played on a frontend

def play_audio(sr, wav):
    # I bet you've seen this duct-tape before
    wav = np.multiply(wav, (2**15)).astype(np.int16)
    wavfile.write("output.wav", rate=sr, data=wav)
    sound = AudioSegment.from_wav('output.wav')
    play(sound)


if __name__ == "__main__":
    bot = SmallTalkAgent()

    for _ in range(10):
        play(tink)
        user_input = recognize(7) # 7 - is a duration of recorded sound
        play(morse)

        print(f"you: {user_input}")
        write_result(user_input, 'you')

        response = bot.talk(user_input)
        write_result(user_input, 'bot')

        print(f"bot: {response}")

        sr, wav = pronounce(response)
        play_audio(sr, wav)

示例#44
0
 def play(self):
     """
     Plays the Audio Object's audio segment, if it isn't empty.
     """
     if len(self._audioseg) > 0:
         play(self._audioseg)
示例#45
0
def play_audio(sr, wav):
    # I bet you've seen this duct-tape before
    wav = np.multiply(wav, (2**15)).astype(np.int16)
    wavfile.write("output.wav", rate=sr, data=wav)
    sound = AudioSegment.from_wav('output.wav')
    play(sound)
示例#46
0
 def clash_callback(self, msg):
     if msg.data < 0.07:
         play(self.clash_sound)
示例#47
0
def playSound(fname, volAmplifier = 0):
    timesound = AudioSegment.from_mp3(fname)
    ts = timesound + volAmplifier
    play(ts)
示例#48
0
 def play_alert(self):
     play(self.alert_sound)
示例#49
0
from pydub import AudioSegment
from pydub.playback import play
import time

_STOP = True

song = AudioSegment.from_mp3("song.mp4")
song.export("song2.mp3", format="mp3")

sixty_seconds = 60 * 1000
first_60_seconds = song[:sixty_seconds]		
play(first_60_seconds)
示例#50
0
 def Play(self, index):
     for x in self.list:
         if index == (self.list.index(x)+1):
             playback.play(x)
from pydub import AudioSegment, playback

song1 = AudioSegment.from_mp3("05 Red Sun.mp3")
song2 = AudioSegment.from_mp3("Happy Background Music.mp3")

output = song1.overlay(song2)

playback.play(output)


class MediaPlayer:
    def __init__(self, list):
        self.list = list
    def play (self, index):
        song = AudioSegment.from_mp3(self.list[index])
        playback.play(song)
    def mixAndPlay (self, index1, index2):
        song1 = AudioSegment.from_mp3(self.list[index1])
        song2 = AudioSegment.from_mp3(self.list[index2])
        output = song1.overlay(song2)
        playback.play(output)

list = ["05 Red Sun.mp3","Happy Background Music.mp3"]

ly = MediaPlayer(list)
ly.play(1)
ly.mixAndPlay(0,1)
示例#52
0
 def play(self):
     for chunk in self.composition:
         pb.play(chunk)
 def mixAndPlay (self, index1, index2):
     song1 = AudioSegment.from_mp3(self.list[index1])
     song2 = AudioSegment.from_mp3(self.list[index2])
     output = song1.overlay(song2)
     playback.play(output)
示例#54
0
def raise_alarm():
    "used to play the alarm sound on loop"
    alert_sound = AudioSegment.from_wav("audio/beep-06.wav")
    while ALARM_ON:
        play(alert_sound)
示例#55
0
def play(filename):
    playback.play(AudioSegment.from_mp3(filename))
示例#56
0
 def playAudio(self, vector, frame_rate, channels):
     audio = self.toAudio(frame_rate, vector, channels)
     play(audio)
示例#57
0
 def MixandPlay(self,index1,index2):
     song1 = AudioSegment.from_mp3(self.song_list[index1])
     song2 = AudioSegment.from_mp3(self.song_list[index2])
     output = song1.overlay(song2)
     play(output)
示例#58
0
def mousePressed(event):
    global percentChange
    global currentlyPlaying
    global volumePressed
    global dB
    global track
    global panPressed
    global percent
    global startLoad
    myfont = pygame.font.SysFont("monospace", 25)
    (event.x, event.y) = event.pos 
    listOfLines = getRectangleSizes(track)
    for i in range(len(buttonList)):
        (x1,y1,width,height) = buttonList[i]
        (xPos, yPos) = event.pos
        if (x1 < xPos < x1 + width) and (y1 < yPos < y1 + height):
            pygame.draw.rect(screen, green, (x1+2,y1+2,width-4,height-4))
            label = myfont.render(buttonNames[i], 1, (255,0,0))
            screen.blit(label, (x1+25, y1))
            if i == 0:
                track = recursiveEcho(track)
                listOfLines = getRectangleSizes(track)
                if startLoad == False:
                    drawSong(listOfLines)
            if i == 1:
                track = repeatTrack(track)
                listOfLines = getRectangleSizes(track)
                pygame.draw.rect(screen,white,(startX-(width//len(listOfLines)),15,
                    560,105))
                if startLoad == False:
                    drawSong(listOfLines)
            if i == 2:
                track = reverseTrack(track)
                listOfLines = getRectangleSizes(track)
                pygame.draw.rect(screen,white,(startX-(width//len(listOfLines)),15,
                    560,105))
                if startLoad == False:
                    drawSong(listOfLines)
            if i == 3:
                volumePressed = True
                increaseVol = myfont.render("+",1,(0,0,0))
                screen.blit(increaseVol,(x1+150,y1+25))
                pygame.draw.rect(screen,black,(x1+148,y1+28,20,20),2)
                decreaseVol = myfont.render("-",1,(0,0,0))
                screen.blit(decreaseVol,(x1+150,y1+50))
                pygame.draw.rect(screen,black,(x1+148,y1+53,20,20),2)
                decibels = myfont.render("dB:"+str(dB),1,(0,0,0))
                screen.blit(decibels,(x1+50,y1+35))
            if i == 4:
                num = input('How many seconds do you want to interleave? ---> ')
                track = interLeaveSongs(int(num))
                startLoad = False
            if i == 5:
                panPressed = True
                rightSide = myfont.render("R",1,(0,0,0))
                screen.blit(rightSide,(x1+150,y1+45))
                pygame.draw.rect(screen,black,(x1+148,y1+48,20,20),2)
                leftSide = myfont.render("L",1,(0,0,0))
                screen.blit(leftSide,(x1+25,y1+45))
                pygame.draw.rect(screen,black,(x1+23,y1+48,20,20),2)
                percentage = myfont.render(str(percent)+"%",1,(0,0,0))
                screen.blit(percentage,(x1+65,y1+45))
            if volumePressed:
                if x1+148 < event.x < x1+168:
                    if y1+28 < event.y < y1+48:
                        if dB < 10:
                            dB += 1
                            track = changeVolume(track, 1)
                    elif y1+53 < event.y < y1+73:
                        if dB > -10:
                            dB -= 1
                            track = changeVolume(track, -1)
            if panPressed:
                if y1+48 < event.y < y1+68:
                    if x1+148 < event.x < x1+168:
                        if percent < 100:
                            percent += 5
                            percentChange += 0.05
                            if almostEqual(percentChange,1.0):
                                percentChange = 1.0
                    elif x1+23 < event.x < x1+43:
                        if percent > -100:
                            percent -= 5
                            percentChange += -0.05
                            if almostEqual(percentChange,-1.0):
                                percentChange = -1.0
        else:
            pygame.draw.rect(screen, white, (x1+2,y1+2,width-4,height-4))
            label = myfont.render(buttonNames[i], 1, (255,0,0))
            screen.blit(label, (x1+25, y1))
            if i == 3:
                increaseVol = myfont.render("+",1,(0,0,0))
                screen.blit(increaseVol,(x1+150,y1+25))
                pygame.draw.rect(screen,black,(x1+148,y1+28,20,20),2)
                decreaseVol = myfont.render("-",1,(0,0,0))
                screen.blit(decreaseVol,(x1+150,y1+50))
                pygame.draw.rect(screen,black,(x1+148,y1+53,20,20),2)
                decibels = myfont.render("dB:"+str(dB),1,(0,0,0))
                screen.blit(decibels,(x1+50,y1+35))
            if i == 5:
                rightSide = myfont.render("R",1,(0,0,0))
                screen.blit(rightSide,(x1+150,y1+45))
                pygame.draw.rect(screen,black,(x1+148,y1+48,20,20),2)
                leftSide = myfont.render("L",1,(0,0,0))
                screen.blit(leftSide,(x1+25,y1+45))
                pygame.draw.rect(screen,black,(x1+23,y1+48,20,20),2)
                percentage = myfont.render(str(percent)+"%",1,(0,0,0))
                screen.blit(percentage,(x1+65,y1+45))
    if 200 < event.x < 400 and 300 < event.y < 375:
        drawSong(listOfLines)
        currentlyPlaying = not(currentlyPlaying)
        drawPlayButton(currentlyPlaying)
        track = panTrack(track, percentChange)
        pygame.display.update()
        play(track)
        currentlyPlaying = not(currentlyPlaying)
        drawPlayButton(currentlyPlaying)
        pygame.display.update()
    if 140 < event.y < 180:
        if 25 < event.x < 105:
            track = loadFile()
            startLoad = False
        elif 115 < event.x < 195:
            saveFile(track)
            print('File Successfully Saved!')
        else:
            drawLoadSave(0)
    pygame.display.update()
示例#59
0
	def play(self):
		play(self.pod)
示例#60
0
 def playLineWait(self, path):
     sound = AudioSegment.from_mp3(path)
     play(sound)