示例#1
0
def playSound(path, ifLoop):
	global all_process
	if "-mute" not in sys.argv:
		if DGMain.playMusic == True:
			if ifLoop is True:
				playback = _play_with_simpleaudio(pydub.AudioSegment.from_ogg(path) * 20)
			else:
				playback = _play_with_simpleaudio(pydub.AudioSegment.from_ogg(path))

			all_processes.append(playback)
		else:
			return "No music played"
示例#2
0
def music_player():
    music_refresh()
    if request.method == "POST":
        play = request.form.get("play_music")
        if play:
            play = Music.query.filter_by(id=play).first()
            play = "music/"+play.title
            return render_template("music_player.html", user=current_user, entries=Music.query.all(),play=play)

        music = request.form.get("music")
        print("\nmusic_tell",music,"\n")
        stop = request.form.get("stop")
        music = Music.query.filter_by(id=music).first()
        if stop and len(safe) > 0:
            try:
                safe[0].stop()
                flash("lied angehalten!", category="success")
            except:
                flash("ERROR!!!", category="error")
        if music:
            if len(safe) > 0:
                try:
                    safe[0].stop()
                    flash("neus lied: " + music.title, category="success")
                except:
                    flash("ERROR!!", category="error")
            playback = AudioSegment.from_file(music.link)
            playback = _play_with_simpleaudio(playback)
            safe_change([playback])
    return render_template("music_player.html", user=current_user, entries=Music.query.all(),play=None)
示例#3
0
def play(playlist_changes):
    t = threading.currentThread()
    player = None
    crossfade = False

    while getattr(t, "keep_running", True):
        try:
            changes = playlist_changes.get(False)
        except queue.Empty:
            continue

        #do something with the changes
        if (changes.orderType == OrderType.SONG_CHANGE):
            if (changes.songChangeType == SongChangeType.FAST):
                print(changes.path)
                audio = AudioSegment.from_file(changes.path)
                if player:  #already another song playing
                    crossfade = True
                    for i in range(10):  #turn down volume
                        call([
                            "amixer", "-D", "pulse", "-q", "sset", "Master",
                            "10%-"
                        ])
                        time.sleep(0.05)
                    player.stop()  #stop playing old song
                player = _play_with_simpleaudio(audio)  #start new song
                if crossfade:
                    for i in range(10):  #turn up volume
                        call([
                            "amixer", "-D", "pulse", "-q", "sset", "Master",
                            "5%+"
                        ])
                        time.sleep(0.05)
            else:
                time.sleep(3)
                audio = AudioSegment.from_file(changes.path)
                if player:
                    player.stop()
                player = _play_with_simpleaudio(audio)

        if (changes.orderType == OrderType.VOLUME_CHANGE):
            if (changes.volume == 1):
                call(["amixer", "-D", "pulse", "-q", "sset", "Master", "5%+"])
            else:
                call(["amixer", "-D", "pulse", "-q", "sset", "Master", "5%-"])

    print("stopped music playback")
def startVideoAtFrame(currentFrame):
    global isPlaying, audioPlayback, playTime
    isPlaying = True
    # play audio
    sliced = audio[int(currentFrame / totalFrames * len(audio)):]
    beforeTime = time.time()
    audioPlayback = _play_with_simpleaudio(sliced)
    playTime = beforeTime - currentFrame / frameRate
示例#5
0
def Watson_Text2Speech(Text_toConvert):
    # Authentication
    ################### use your API key and URL ###################
    t2s_authenticator = IAMAuthenticator('')
    text_to_speech = TextToSpeechV1(authenticator=t2s_authenticator)
    text_to_speech.set_service_url('')

    # Converting text to speech and play it
    # this procedure saves the audio in audio path under name "Watson_Answer_Speech.wav"
    with open(AudioPath + 'Watson_Answer_Speech.wav', 'wb') as audio_file:
        audio_file.write(
            text_to_speech.synthesize(Text_toConvert,
                                      voice='en-US_AllisonV3Voice',
                                      accept='audio/wav').get_result().content)

        audio_to_play = AudioSegment.from_wav(audio_file.name)
        _play_with_simpleaudio(audio_to_play)
示例#6
0
    def actual_loop(self):
        # ----------------------------------------------------------------------
        # Inspired from https://stackoverflow.com/a/34497639/6862058
        # ----------------------------------------------------------------------
        print("<z>: Start/stop the audio")
        print("<m>: Mark bar")
        print("Press <z> to start!:\n\n")

        orig_settings = termios.tcgetattr(sys.stdin)
        tty.setcbreak(sys.stdin)

        is_playing = False

        while True:

            x = sys.stdin.read(1)[0]
            if x == "z":
                if self.playback is None:
                    self.start_time = time.time()
                    self.playback = _play_with_simpleaudio(self.audio_segment)

                else:
                    self.end_time = time.time()
                    if self.playback.is_playing():
                        self.playback.stop()
                    break

            elif x == "m":
                curr_time = time.time()
                rel_bar_start_time = curr_time - self.start_time
                self.num_bars += 1
                self.rel_bar_start_timestamps.append(rel_bar_start_time)
                print("Bar #{:>3}: {}".format(self.num_bars,
                                              rel_bar_start_time))

        termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
        return
示例#7
0
def play(sound):
    '''
        Play sound in a thread
    '''
    play_obj = _play_with_simpleaudio(sound)
示例#8
0
 def play(self):
     self.stop()
     if self.audio is not None:
         self.playback = _play_with_simpleaudio(self.audio)
示例#9
0
# set up plot
fig, ax = plt.subplots()
ind = np.arange(1, 4)
plt.show(block=False)
clarinet_bar, trumpet_bar, violin_bar = plt.bar(ind, (0, 0, 0))
clarinet_bar.set_facecolor('r')
trumpet_bar.set_facecolor('g')
violin_bar.set_facecolor('b')
ax.set_xticks(ind)
ax.set_xticklabels(['Clarinet', 'Trumpet', 'Violin'])
ax.set_ylim([0, 100])
ax.set_ylabel('Confidence')
ax.set_title('Real Time Predictions')

# begin playback
_play_with_simpleaudio(audio)
tstart = time.time()

clarinet = 0
trumpet = 0
violin = 0

# update plot
while True:
    # retrieve current sample
    tnow = time.time() - tstart
    current = int(tnow * sample_rate)

    # break if there are not enough samples (end of song)
    if current + fft_size >= len(l_samples):
        break
示例#10
0
文件: pyano.py 项目: leonichel/pyano
def play(sound):
    play_obj = _play_with_simpleaudio(sound)
示例#11
0
def run_demo(net, height_size, track, smooth):
    net = net.eval()
    net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []

    # Tarit defined
    slope_threshold = 0.4
    ear_slope_threshold = 0.5
    eye_ear_slope_threshold = 0.5
    not_detected = (-1, -1)
    sleep_confirmation_time = 2  # in seconds

    # flags to detect whether the person is sleeping or not
    sleeping = False

    timer_started = False

    time_notified = 0
    selected_pose = None

    while True:
        img = cap.read()

        #start_time = time.time()
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(
            net, img, height_size, stride, upsample_ratio)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(
                heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(
            all_keypoints_by_type, pafs)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (
                all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (
                all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])

            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)

        if track:
            track_poses(previous_poses, current_poses, smooth=smooth)
            previous_poses = current_poses

        '''for pose in current_poses:
            pose.draw(img)'''

        # find longest_nect_to_nose_dst and select that pose
        longest_nect_to_nose_dst = 0
        for pose in current_poses:
            nose = tuple(pose.keypoints[0])
            neck = tuple(pose.keypoints[1])
            # pythagoras
            nect_to_nose_dst = pow(
                (pow(abs(nose[0] - neck[0]), 2)) + (pow(abs(nose[1] - neck[1]), 2)), 1/2)
            if nect_to_nose_dst > longest_nect_to_nose_dst:
                longest_nect_to_nose_dst = nect_to_nose_dst
                selected_pose = pose

        if selected_pose is not None:
            selected_pose.draw(img)

            nose = tuple(selected_pose.keypoints[0])
            neck = tuple(selected_pose.keypoints[1])
            l_ear = tuple(selected_pose.keypoints[16])
            r_ear = tuple(selected_pose.keypoints[17])
            l_eye = tuple(selected_pose.keypoints[15])
            r_eye = tuple(selected_pose.keypoints[14])

            # print(cal_slope(l_eye,l_ear),cal_slope(r_eye,r_ear))

            # detect if the person back if facing to the camera
            if nose == (-1, -1):
                if l_ear != not_detected and r_ear != not_detected:
                    ear_slope = abs(l_ear[1] - r_ear[1])/abs(l_ear[0]-r_ear[0])
                    cv2.circle(img, l_ear, 5, (255, 0, 0), 3)
                    cv2.circle(img, r_ear, 5, (0, 255, 0), 3)
                    if ear_slope > ear_slope_threshold:
                        sleeping = True
                        # print("sleeping")
                    else:
                        sleeping = False
                else:
                    # out of condition, can't detect
                    sleeping = False
            else:
                cv2.circle(img, nose, 5, (255, 0, 0), 3)
                cv2.circle(img, neck, 5, (0, 255, 0), 3)

                slope_inverse = (nose[0] - neck[0]) / (nose[1] - neck[1])
                l_ear_eye_slope = cal_slope(l_eye, l_ear)
                r_ear_eye_slope = cal_slope(r_eye, r_ear)

                # increase the slope_threshold if the person is turning their head
                # print(pose.keypoints[16],pose.keypoints[17]) #print ear location
                if l_ear == (-1, -1) or r_ear == (-1, -1):
                    slope_threshold = 1
                    print("one ear missing , Increasing slope_threshold")
                else:
                    slope_threshold = 0.4

                if abs(slope_inverse) > slope_threshold:
                    # cv2.putText(img,"".join([str(pose.id),"sleeping"]),(20,50),cv2.FONT_HERSHEY_COMPLEX,2,(255,0,0),3)
                    # print("Sleeping (neck bend more than threshold)")
                    # cv2.putText(img,"sleeping",(20,50),cv2.FONT_HERSHEY_COMPLEX,2,(255,0,0),3)
                    sleeping = True

                elif l_eye == not_detected or r_eye == not_detected:
                    sleeping = True
                    # print("Sleeping (not seeing both eyes)")

                elif l_ear_eye_slope < -0.6 or r_ear_eye_slope > 0.6 or l_ear_eye_slope > eye_ear_slope_threshold or r_ear_eye_slope < -eye_ear_slope_threshold:
                    sleeping = True
                    # print("Sleeping (ears higher/lower than eyes)")

                else:
                    # print("Not sleeping")
                    sleeping = False

            if sleeping:
                if not timer_started:
                    t_start_sleep = time.time()
                    timer_started = True
                else:
                    if time.time() - t_start_sleep > sleep_confirmation_time:
                        print("sending line message")
                        pic_name = f"log_data/{time_notified}.jpg"
                        cv2.imwrite(pic_name, img)
                        #lineNotify("Elderly sleeping %d"%time_notified)
                        notifyFile("Elderly sleeping %d" %
                                   time_notified, pic_name)
                        playback = _play_with_simpleaudio(sound)
                        time_notified += 1
                        timer_started = False
                        sleeping = False
            else:
                timer_started = False


        img = cv2.addWeighted(orig_img, 0.6, img, 0.6, 0)

        for pose in current_poses:
            cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                          (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
            if track:
                cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))

        cv2.imshow('Sleep detector', img)

        # print((1/(time.time()-start_time)))
        if cv2.waitKey(1) == ord("q"):
            cap.stop()
            return