def main(): """Shows basic usage of the Google Calendar API. Creates a Google Calendar API service object and outputs a list of the next 10 events on the user's calendar. """ credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http) now = datetime.datetime.utcnow().isoformat( ) + 'Z' # 'Z' indicates UTC time print('Getting the upcoming 10 events') eventsResult = service.events().list(calendarId='primary', timeMin=now, maxResults=10, singleEvents=True, orderBy='startTime').execute() events = eventsResult.get('items', []) if not events: print('No upcoming events found.') for event in events: start = event['start'].get('dateTime', event['start'].get('date')) print(start, event['summary']) es = espeak.ESpeak() es.say(start) es.say(event['summary'])
def __init__(self, view): # Engine to produce sound for any word # self.es = espeak.ESpeak() self.NUMBER_OF_LEVELS = 2 self.view = view self.view.window.connect("key-press-event", self.readKey) self.view.vbox.connect('expose-event', self.addImage) # Functions for the buttons self.left_button_signal = self.view.left_button.connect( "clicked", self.play_word) self.right_button_signal = self.view.right_button.connect( "clicked", self.next_word, "Next Word") # Fields of the controller self.level = 0 self.score = 0 self.dictionary = {} # A dictionary of all the words used in the game self.level_words = [] # the list of words to be played self.currentIndex = 0 # index of the current word that is pronounced self.check_current_word = False # keep track of whether the current word has been checked self.typed = "" # keeps track of what the user has typed so far self.skipped = [] # list of words that are skipped self.misspelled = { } # dictionary of misspelled words and the number of times the user misspelled each word # Set the game up for the first level self.next_level() self.view.typeBox.createTextBoxes(len(self.level_words[0])) self.es = espeak.ESpeak(voice="en+f1", speed=200)
def doTTS(self, textContent): es = espeak.ESpeak() es.voice = 'en+f4' es.speed = 100 es.capitals = 25 file_name = "{}_{}.wav".format(textContent, format(time.time(), ".0f")) full_file_path = "{}{}".format(sounds_path, file_name) es.save(textContent, full_file_path) print("save file name:{}".format(full_file_path)) return file_name
def setUp(self): self.e = espeak.ESpeak()
def test_init_arg(self): self.e = espeak.ESpeak(amplitude=10) self.assertEqual(self.e.args['amplitude'][1], 10)
import networkx as nx import collections import json import io import os import sys import matplotlib.pyplot as plt import argparse, random from pycorenlp import StanfordCoreNLP from Globals import Globals from networkx.classes.function import neighbors import espeak #https://github.com/relsi/python-espeak male = espeak.ESpeak(amplitude=200, word_gap=-50, pitch=36, speed=200, voice='english-us') female = espeak.ESpeak(amplitude=200, word_gap=-50, pitch=77, speed=200, voice='english-us') #male.say("John moved to the office") #female.say("Sandra journeyed to the bathroom") #female.say("Mary moved to the hallway") #male.say("Daniel travelled to the office") #female.say("John went back to the garden") #male.say("John moved to the bedroom") class babiGraph(): def __init__(self): self.edgeList=[] self.nodeList=[] self.timeStampLemmaDict = dict() self.subStoryFacts = dict() self.G=nx.Graph()
import time import espeak import paho.mqtt.client as mqtt broker_address = "192.168.1.32" import RPi.GPIO as GPIO es = espeak.ESpeak() test_msg = "Speaker is connected" es.say(test_msg) # setup button GPIO.setmode(GPIO.BCM) GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) if GPIO.input(23) == 0: print("button pressed") #define callback def on_message(client, userdata, message): time.sleep(1) print("received message =", str(message.payload.decode("utf-8"))) text_message = str(message.payload.decode("utf-8")) es.say(text_message) def on_disconnect(client, userdata, rc): error_message = str(rc) es.say("disconnected because " + error_message)
def rap(tweet): rap = build_rap(tweet) es = espeak.ESpeak(pitch=1) es.say(rap)
def speak(): es = espeak.ESpeak() # es.say(pas) filename = datetime.now().strftime('%Y%m%d_%H:%M:%S') wav = filename +".wav" es.save(pas, wav)
import io import os import sys import matplotlib.pyplot as plt import argparse, random from pycorenlp import StanfordCoreNLP from Globals import Globals from networkx.classes.function import neighbors from babiparser import BabiParser import time # TODO: make this optional import espeak #https://github.com/relsi/python-espeak female = espeak.ESpeak(amplitude=200, word_gap=-50, pitch=36, speed=220, voice='en-us') male = espeak.ESpeak(amplitude=200, word_gap=-50, pitch=80, speed=220, voice='en-us+f4') class ActionClassifier(object): def __init__(self, input_action, model=None): # Label -> set of Actions with open(input_action, 'r') as annotatedActions: actions = json.load(annotatedActions) actionsDict = {}
def __init__(self): rospy.init_node(self.NODE_NAME) self.sub = rospy.Subscriber('/tts_text', String, self.tts_callback) self.es = espeak.ESpeak()
def __init__(self, word): super(Speak, self).__init__() self._stop = threading.Event() self.word = word self.es = espeak.ESpeak(voice="en+f1", speed=230)