예제 #1
0
def play(song):
    player = MPyg321Player()
    player.play_song(song)
    print('enter sleep')
    sleep(10)
    player.stop()
    print('stop player')
예제 #2
0
def MusicPlay(RareInput):
    MusicName, text = RareInput.split("()")
    print(MusicName, text)
    player = MPyg321Player()  # instanciate the player
    player.play_song(MusicName + ".mp3")  # play a song
    sleep(FindHowMuchToPlay(MusicName, text))
    player.quit()
예제 #3
0
파일: main.py 프로젝트: timothymarotta/One
def main():
    text = story.text
    full_replace = story.full_replace
    for x in range(len(full_replace)):
        full_replace[x] = full_replace[x].strip()
    unique_replace = list(OrderedDict.fromkeys(full_replace))
    print(
        "Welcome Story Class!! Below I will ask a series of questions to help make Tim's story. Shout out answers "
        "as you see fit!")
    for term in unique_replace:
        temp = input(term + ": ")
        if temp == "break":
            break
        for item in full_replace:
            if item is term:
                full_replace[full_replace.index(item)] = temp
    final_text = ""
    for count in range(len(text)):
        final_text += text[count]
        if count <= 123:
            final_text += full_replace[count]
        count += 1
    wrapper = textwrap.TextWrapper(width=140)
    word_list = wrapper.fill(text=final_text)
    to_print = open("one_run.txt", "w+")
    to_print.write(word_list)
    print(word_list)

    language = "en"
    myobj = gTTS(text=final_text, lang=language, slow=False)
    myobj.save("one_audio.mp3")
    player = MPyg321Player()
    player.play_song("one_audio.mp3")
예제 #4
0
def browse():
    global player
    file = filedialog.askopenfilename(initialdir='/home/jpolak/Muzyka')
    player = MPyg321Player()
    player.play_song(file)

    global napis
    napis = Label(okno)
    napis.config(text=file.strip('/home/jpolak/Muzyka'))
    napis.grid(column=0, row=1)
예제 #5
0
import wit
import pyaudio
import pvporcupine

from gtts import gTTS
from logmmse import logmmse_from_file
from mpyg321.mpyg321 import MPyg321Player

from responder import Responder

KEYWORDS = ["jarvis", "bumblebee"]

rp = Responder()
pa = pyaudio.PyAudio()
pl = MPyg321Player()
ai = wit.Wit(os.getenv("WITAI_TOKEN"))
porcupine = pvporcupine.create(keywords=KEYWORDS)

sample_rate = porcupine.sample_rate
frames_per_buffer = porcupine.frame_length
DURATION = 4.5

audio_stream = pa.open(
    rate=sample_rate,
    channels=1,
    format=pyaudio.paInt16,
    input=True,
    frames_per_buffer=frames_per_buffer,
)
예제 #6
0
def main():
    """Do the magic"""
    player = MPyg321Player()
    do_some_play_pause(player)
    do_some_jumps(player)
    player.quit()
예제 #7
0
파일: demo4.py 프로젝트: mcvavy/Agnes
import signal
import speech_recognition as sr
import os
import re
import asyncio

from gtts import gTTS
from io import BytesIO
from pygame import mixer as Player

import pygame

# Music Player Setup
from mpyg321.mpyg321 import MPyg321Player
from time import sleep
musicPlayer = MPyg321Player()
#___________________

from datetime import datetime

from wit import Wit
witClient = Wit("JTUTA7EED6JP2VNJPJJOQKT3P7UPQ2HK")

import json

import random

import pyowm
owm = pyowm.OWM(
    'dcac096e8c94a58b502991795e61f6d4')  # You MUST provide a valid API key
예제 #8
0
import requests
import pyttsx3
from gtts import gTTS
from mpyg321.mpyg321 import MPyg321Player
import os
from time import sleep

engine = pyttsx3.init()
player = MPyg321Player()
connected = False
response = requests.get("https://google.com")
debug_option = True


def debug(text):
    if (debug_option):
        print(str(text))


if (response.status_code == 200):
    connected = True
    debug("Connection established")


def disconnectedVoice(text):
    engine.say(text)
    engine.runAndWait()


def connectedVoice(text):
    tts = gTTS(text=text, lang='en')
예제 #9
0
파일: Voice.py 프로젝트: mohabmes/Watari
 def play(self, filepath):
     player = MPyg321Player()
     player.play_song(filepath)
예제 #10
0
# -*- coding: utf-8 -*-

import random
from mpyg321.mpyg321 import MPyg321Player
from pkg_resources import resource_listdir, resource_filename

PLAYER = MPyg321Player()

PACKAGE = 'vendingmachine.resources.sounds'

# TODO: get rid of this / switch to enum:
BUTTON_PRESS = 'button_press'
COIN_INSERT = 'coin_insert'
COIN_REJECT = 'coin_reject'
MUSIC = "music"
NOISE = "noise"


def get_random_mp3_file(subpackage):
    """ Example: get_random_mp3_file(COIN_REJECT) """
    mp3s = []
    subpackage = "{}.{}".format(PACKAGE, subpackage)
    for n in resource_listdir(subpackage, ''):
        filename = resource_filename(subpackage, n)
        if filename.endswith('.mp3'):
            mp3s.append(filename)
    return random.choice(mp3s)


def play_random(subpackage):
    PLAYER.play_song(get_random_mp3_file(subpackage))
예제 #11
0
def face_recon():
    # Initialize some variables

    face_locations = []
    face_names = []
    data = {}
    xl_encoding = []
    process_this_frame = True
    global known_face_encodings, known_face_names
    player = MPyg321Player()
    already_recognized_face_names = []

    try:
        face_records = pd.read_excel('face_codes.xlsx')
        for cols in face_records:
            xl_encoding.append(np.array(face_records[cols]))

        xl_name = list(face_records.columns)
        known_face_encodings = xl_encoding
        known_face_names = xl_name

        print("4", known_face_encodings)
    except:
        pass

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]
                    if name not in already_recognized_face_names:
                        speak_name = gTTS(text="Hello " + name +
                                          " you are handsome",
                                          lang='en',
                                          slow=False)
                        speak_name.save('name.mp3')
                        player.play_song("name.mp3")
                        print(face_names, name)
                        already_recognized_face_names.append(name)
                else:
                    new_embedding = face_recognition.face_encodings(frame)[0]
                    speak_name = gTTS(text="Hello there, what's your name?",
                                      lang='en',
                                      slow=False)
                    speak_name.save('name.mp3')
                    player.play_song("name.mp3")
                    print("Enter : ")
                    time.sleep(4)

                    name = hear()
                    known_face_encodings.append(new_embedding)
                    known_face_names.append(name)

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            for keys in known_face_names:
                for values in known_face_encodings:
                    data[keys] = values
                    known_face_encodings.remove(values)
                    break
            print(data)
            df_k = pd.DataFrame(data)
            print("df", df_k)
            df_k.to_excel('face_codes.xlsx', index=False)

            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()