def __init__(self): self.lock = 0 # 0 directs to initator, -1 will unlock self.interpreter = Interpreter.load('./models/default') self.responder = responder() # Secondary Bots self.initiator = initiator.initiator(self.responder) self.matchmaking = matchmaking.matchmaking(self.responder) self.confluence = confluence.confluence(self.responder) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server_address = ('localhost', 3008) print('[PRIMARY BOT] Starting up on %s port %s' % self.server_address) self.sock.bind(self.server_address) self.forename_1 = '' self.forename_2 = '' self.mode = 0 self.init = 1 response = "TEST. READY TO CHAT." self.responder.respond(response)
def get(self, query=None): """ Get documentation and demo UI. """ readme_view = markdown(view('../../README.md').decode()) demo_view = view('demo.htm').decode() return responder((demo_view + readme_view).encode(), 'text/html')
def http_error(status): """ Create an HTTP error response. Arguments: status: string HTTP status including code number and description. Returns: HTTP response """ error_view = view('error.htm', {'error': status}) response = responder(error_view, 'text/html', status) return response
def post(self, query, postdata): """ Posting to crawl (AKA /) requests spider(s) to crawl each of the specified webpages. Arguments: query: dict having optional depth=n, where the default is 2. postdata: form-urlencoded string must contain newline-separated URLs assigned to a 'urls' variable. Returns: HTTP 202 Accepted or 400 Bad Request. """ if 'urls' in postdata: urls = postdata['urls'][0].splitlines() else: return http_error('400 Bad Request') try: depth = int(query['depth']) except KeyError: depth = 2 # Register all URLs with this job even if their results are cached. # This allows jobs to be stopped and resumed. self.webpages_model.register_job(self.job_id, urls) # Iterate through a copy of urls, since items may be removed from it. for url in urls[:]: status = self.webpages_model.get_status(url) webpage_info = self.webpages_model.get_webpage_info(url) if 'processing' == status and depth > webpage_info['depth']: self.spiders_model.stop(url) elif webpage_info['completion_datetime']: # Ignore webpages with good depth crawled less than 15 min ago. now = datetime.datetime.now() td = now - webpage_info['completion_datetime'] if 900 > td.total_seconds() and depth <= webpage_info['depth']: urls.remove(url) self.webpages_model.add(urls, depth=depth) self.spiders_model.deploy(self.job_id) crawl_view = view('crawl.json', {'job_id': self.job_id}) return responder(crawl_view, 'application/json', '202 Accepted')
def delete(self, query=None): """ Delete the specified URL, all related images, and all crawled children of that URL from the datastores. Arguments: query values: url: string URL. Returns: HTTP 204 or 404. """ if not 'url' in query: return http_error('400 Bad Request') if self.webpages_model.delete(query['url']): return responder(None, None, '204 No Content') else: return http_error('404 Not Found')
def post(self, query, postdata): """ Send an abort-crawl request. Arguments: query: dict query having the following parameter: job_id: integer Job ID. postdata: Ignored. Returns: None """ job_id = query['job_id'] if self.jobs_model.job_exists(job_id): self.spiders_model.stop(job_id) return responder(None, None, '202 Accepted') else: return http_error('404 Not Found')
def post(self, query, postdata): """ Send an abort-crawl request. Arguments: query: dict query having the following parameter: job_id: integer Job ID. postdata: Ignored. Returns: None """ job_id = query["job_id"] if self.jobs_model.job_exists(job_id): self.spiders_model.stop(job_id) return responder(None, None, "202 Accepted") else: return http_error("404 Not Found")
def get(self, query=None): """ Get a list of result images from a given web crawl. Arguments: query values: job_id: integer job id. Returns: JSON list of URLs referencing found image files. """ if not 'job_id' in query and not 'url' in query: return http_error('400 Bad Request') if int == type(query['job_id']): images = self.images_model.get_by_job_id(query['job_id']) else: images = self.images_model.get_by_url(query['url']) result_view = view('result.json', {'images': json.dumps(images)}) return responder(result_view, 'application/json')
def get(self, query=None): """ Get the status of crawling a given URL. Arguments: query: Integer job_id, job_id=<JOB_ID> assignment, or url=<URL> assignment. Returns: JSON spider status """ url = query['url'] if 'url' in query else None job_id = query['job_id'] if 'job_id' in query else None job_id_specified = int == type(job_id) webpage_id = None if url: webpage_id = self.webpages_model.get_webpage_info(url)['id'] if not url and not job_id_specified: return http_error('400 Bad Request') if job_id_specified and not self.jobs_model.job_exists(job_id): return http_error('404 Not Found') elif url and not webpage_id: return http_error('404 Not Found') if job_id_specified: urls = json.dumps(self.jobs_model.get_init_urls(job_id)) job_status = json.dumps(self.jobs_model.get_status(job_id)) else: urls = json.dumps([url]) get_status = self.jobs_model.get_status job_ids = self.webpages_model.get_job_ids(url) job_status_list = [get_status(job_id) for job_id in job_ids] job_status_list = [status for status in job_status_list if status] job_status = json.dumps(job_status_list) status_view = view('status.json', {'urls': urls, 'job_status': job_status}) return responder(status_view)
def goAirportAgent(parent=None, communication_line=None): import os import sys import signal import ctypes import multiprocessing from time import sleep from threading import Thread from time import sleep from random import random import speech_recognition as sr import responder import airportAgent_functions as aaf r = sr.Recognizer() # Set _i = 0 driver = None _worked = None currentUserTicket = None _asking_for_flight = False _asking_for_lost = False _asking_for_hotel = False _asking_for_taxi = False _hold_number = False _hold_destination = False _monitor_stop = 0 if communication_line is None: communication_line = random() with open("docs/settings.txt", "r") as f: settings = [i.rstrip() for i in f.readlines()] reset_after_being_idle_for = int(settings[7].split(" = ")[-1]) input_device_index = settings[9].split(" = ")[-1] output_device_index = settings[10].split(" = ")[-1] offline_text_to_speech = int(settings[0].split(" = ")[-1].rstrip()) aaf.clearer() print("Please wait...") def reset(): '''Resets current session's variables and activities''' global currentUserTicket, _asking_for_flight, _asking_for_lost, _asking_for_hotel, _asking_for_taxi, _hold_number, _hold_destination aaf.cache_clearer() currentUserTicket = 33 _asking_for_flight = 33 _asking_for_lost = 33 _asking_for_hotel = 33 _asking_for_taxi = 33 _hold_number = 33 _hold_destination = 33 # If it's a ctypes object, treat it differently if "sharedctypes" in str(type(communication_line)): communication_line.value = "/go_red".encode() aaf.kill_chrome(driver) def resetter(): while not _monitor_stop: sleep(3) # Reset variables if no user talked to the agent for X seconds if (responder.last_activity + reset_after_being_idle_for) > aaf.current_seconds(): pass else: reset() # Realtime monitoring whether if the agent is idle. Thread(target=resetter).start() while 1: try: # Set listening port # if 'auto' .. try all indexes till you find one working. # if already set to an index, then use that. with sr.Microphone(device_index=(_i if input_device_index.lower( ) == "auto" else int(input_device_index))) as source: print("Input Device Index:", (str(_i) + " (auto)" if input_device_index.lower() == "auto" else int(input_device_index))) print("Output Device Index:", output_device_index) _worked = _i # If no error raised at device_index=_i, # then the said _i is a source of voice input aaf.clearer() # Clear the screen print("Listening...") # if agent launched without animations if not communication_line: aaf.cache_clearer() while 1: # Keep listening # Filter noise r.adjust_for_ambient_noise(source) # Listen to the port (the source) audio = r.listen(source) try: # Send then hold what Googgle's Speech-to-Text returns text = r.recognize_google(audio) # Respond or do an action refresh_vars = responder.responder( text, communication_line, (aaf.say2 if offline_text_to_speech else aaf.say1), aaf.clearer, currentUserTicket, _asking_for_flight, _asking_for_lost, _asking_for_hotel, _asking_for_taxi, _hold_number, _hold_destination) # Refresh variables currentUserTicket = refresh_vars[0] _asking_for_flight = refresh_vars[1] _asking_for_lost = refresh_vars[2] _asking_for_hotel = refresh_vars[3] _asking_for_taxi = refresh_vars[4] _hold_number = refresh_vars[5] _hold_destination = refresh_vars[6] driver = refresh_vars[7] # Reset if Idle for more than X seconds # Exit from the listening loop if the session ended except SystemExit: # Let resetter know that execution stopped _monitor_stop = 1 # clear current session's activity aaf.cache_clearer() # exit chrome if currently working if driver: aaf.kill_chrome(driver) # Remove voice outputs output_file = 'output_' + str(id(communication_line)) if os.path.exists( os.path.join(os.getcwd(), output_file + ".wav")): os.remove( os.path.join(os.getcwd(), output_file + ".wav")) if os.path.exists( os.path.join(os.getcwd(), output_file + ".mp3")): os.remove( os.path.join(os.getcwd(), output_file + ".mp3")) # kill parent (Animations; If initialized from there) if parent: os.kill(parent, signal.SIGTERM) # kill self os.kill(os.getpid(), signal.SIGTERM) # Handle the error if voice was not recognized except sr.UnknownValueError: print("Sorry I didn't hear that. Can you repeat that?") except Exception as e: print(e) sleep(5) # Inform the user if the device at index of '_i' was not found except AssertionError: print(f"Device at device_index={_i} was not found, trying another" " one.") sleep(3) # Check if the input source is being used by another device except OSError as e: if e.errno == -9998: aaf.clearer() print(f"device_index at {_i} is being used by another program" " or not available. Trying another one") sleep(2) else: print(e) sleep(2) # If no input device found at index of '_i', then try another one if _worked is None and input_device_index.lower() == "auto": _i += 1 # If it wasn't auto, and reached this place, then the above while # already finished exectuing, therefore, break. else: break
def goSofia(): import os import sys from time import sleep import speech_recognition as sr import responder import sofia_functions as sf r = sr.Recognizer() _breaker = 0 _i = 0 _worked = None while True: try: # Set listening port with sr.Microphone(device_index=_i) as source: _worked = _i # If no error raised at device_index=_i, # then said _i is a source of voice input sf.clearer() # Clear the screen print("Listening...") sf.cache_clearer() # Clear the past session's data while 1: # Keep listening # Filter noise r.adjust_for_ambient_noise(source) # Listen to the port(The source) audio = r.listen(source) try: # Send then hold what Googgle's Speech-to-Text returns text = r.recognize_google(audio) # Respond or do an action responder.responder(text, sf.say1, sf.clearer) # Exit from Listening loop if session ended except SystemExit: _breaker = 1 # End the main loop break # Break listening loop # Handle it if voice was not recognized except sr.UnknownValueError: print("Sorry I didn't hear that. Can you repeat that?") except Exception as e: print(e) # Inform the user if the device at index of '_i' was not found except AssertionError: print( f"Device at device_index={_i} was not found, trying another one." ) sleep(3) # Check if Sofia already running in another window except OSError as e: if e.errno == -9998: sf.clearer() print("Sofia already running.") break else: print(e) if _breaker == 1: break # If no input device found at index of '_i', then try another one if _worked is None: _i += 1 sf.cache_clearer()
def __init__(self): self.exitQ=exitQueue() signal.signal(signal.SIGINT, stopMe) self.responder=responder.responder() self.dispatcher=responder.dispatcher()