def __init__(self, address=config.BRAIN_URL_LOCAL, clear_all=False, quiet=False): """ Interact with Triple store Parameters ---------- address: str IP address and port of the Triple store """ self._connection = StoreConnector(address, format='trig') self._rdf_builder = RdfBuilder() self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted") self._brain_log = config.BRAIN_LOG_ROOT.format( datetime.now().strftime('%Y-%m-%d-%H-%M')) if not quiet: # Possible clear all contents (testing purposes) if clear_all: self.clear_brain() # Start with a clean local memory self.clean_local_memory() # Upload ontology here self.upload_ontology()
def __init__(self, resolution, rate, callbacks): # type: (CameraResolution, int, List[Callable[[AbstractImage], None]]) -> None # Extract Image Dimensions from CameraResolution self._resolution = resolution self._width = self._resolution.value[1] self._height = self._resolution.value[0] self._shape = np.array([self.height, self.width, self.channels]) # Store Camera Rate and Callbacks self._rate = rate self._callbacks = callbacks # Variables to do some performance statistics self._dt_buffer = deque([], maxlen=10) self._true_rate = rate self._t0 = time() # Create Mailbox and Image Processor: # Each time an image is captured it is put in the mailbox, overriding whatever there might currently be. # In a separate thread, the _processor worker takes an image and calls all registered callbacks. # This way the processing of images does not block the acquisition of new images, # while at the same new images don't build up a queue, but are discarded when the _processor is too busy. self._mailbox = Mailbox() self._processor_scheduler = Scheduler(self._processor, name="CameraThread") self._processor_scheduler.start() # Default behaviour is to not run by default. Calling AbstractApplication.run() will activate the camera self._running = False self._log = logger.getChild(self.__class__.__name__)
def __init__(self, resolution, rate, callbacks): """ Abstract Camera Parameters ---------- resolution: CameraResolution rate: int callbacks: list of callable """ self._resolution = resolution self._width = self._resolution.value[1] self._height = self._resolution.value[0] self._rate = rate self._callbacks = callbacks self._shape = np.array([self.height, self.width, self.channels]) self._dt_buffer = deque([], maxlen=10) self._true_rate = rate self._t0 = time() self._queue = Queue(maxsize=2) self._processor_thread = Thread(target=self._processor) self._processor_thread.daemon = True self._processor_thread.start() self._running = False self._log = logger.getChild(self.__class__.__name__)
def __init__(self, address=config.BRAIN_URL_LOCAL): """ Interact with Triple store Parameters ---------- address: str IP address and port of the Triple store """ self.address = address self.namespaces = {} self.ontology_paths = {} self.format = 'trig' self.dataset = Dataset() self.query_prefixes = read_query('prefixes') self._define_namespaces() self._get_ontology_path() self._bind_namespaces() self.my_uri = None self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted") self._brain_log = config.BRAIN_LOG_ROOT.format( datetime.now().strftime('%Y-%m-%d-%H-%M')) # Launch first query self.count_statements()
def __init__(self, rate, channels, callbacks): """ Abstract Microphone Parameters ---------- rate: int channels: int callbacks: list of callable """ self._rate = rate self._channels = channels self._callbacks = callbacks self._dt_threshold_multiplier = 1.5 self._dt_buffer = deque([], maxlen=32) self._true_rate = rate self._t0 = time() self._queue = Queue() self._processor_thread = Thread(target=self._processor) self._processor_thread.daemon = True self._processor_thread.start() self._log = logger.getChild(self.__class__.__name__) self._running = False self._blocks = 0
def __init__(self, people, n_neighbors=20): """ Classify Faces of Known People Parameters ---------- people: dict new: dict n_neighbors: int """ self._people = people self._n_neighbors = n_neighbors self._names = sorted(self.people.keys()) self._indices = range(len(self._names)) if self.people: self._labels = np.concatenate([[index] * len(self.people[name]) for name, index in zip(self._names, self._indices)]) self._features = np.concatenate([self.people[name] for name in self._names]) self._classifier = KNeighborsClassifier(self._n_neighbors) self._classifier.fit(self._features, self._labels) self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted")
def __init__(self, language=config.APPLICATION_LANGUAGE, sample_rate=config.MICROPHONE_SAMPLE_RATE, hints=()): # type: (str, int, Iterable[str]) -> None AbstractASR.__init__(self, language) GoogleTranslator.__init__(self, config.APPLICATION_LANGUAGE[:2], config.INTERNAL_LANGUAGE[:2]) self._client = speech.SpeechClient() self._config = speech.types.RecognitionConfig( # Each Sample is of dtype int16 encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, # The are 'sample_rate' samples per second sample_rate_hertz=sample_rate, # The Language & Region # Tip: use en-GB for better understanding of 'academic English' language_code=language, # The maximum number of hypotheses to generate per speech recognition max_alternatives=self.MAX_ALTERNATIVES, # Particular words or phrases the Speech Recognition should be extra sensitive to speech_contexts=[speech.types.SpeechContext(phrases=hints)]) self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted ({} -> {})".format(self.source, self.target))
def __init__(self, rate, channels, callbacks): # type: (int, int, List[Callable[[np.ndarray], None]]) -> None self._rate = rate self._channels = channels self._callbacks = callbacks # Variables to do some performance statistics self._dt_buffer = deque([], maxlen=32) self._true_rate = rate self._t0 = time() # Create Queue and Sound Processor: # Each time audio samples are captured it is put in the audio processing queue # In a separate thread, the _processor worker takes these samples and calls all registered callbacks. # This way, samples are not accidentally skipped (NAOqi has some very strict timings) self._queue = Queue() self._processor_scheduler = Scheduler(self._processor, 0, name="MicrophoneThread") self._processor_scheduler.start() # Default behaviour is to not run by default. Calling AbstractApplication.run() will activate the microphone self._running = False self._log = logger.getChild(self.__class__.__name__)
def __init__(self, backend): # type: (AbstractBackend) -> None super(ExploreComponent, self).__init__(backend) # Requires the ContextComponent to know which objects/people to look for context = self.require(ExploreComponent, ContextComponent) # type: ContextComponent log = logger.getChild(ExploreComponent.__name__) def explore(): # type: () -> None """Explore Environment to keep up to date on the people/objects inhabiting it.""" # Get Observations, sorted (high to low) by last time seen observations = sorted(context.context.objects, key=lambda obj: obj.time) # If there are any observations and the odds are in this if statement's favour if observations and random.random() > 0.33333: # Look at least recently seen object's last known location log.debug("Look at {}".format(observations[0])) self.backend.motion.look(observations[0].direction, ExploreComponent.SPEED) else: # Look at random point (to keep exploring enviroment) log.debug("Look at random point") self.backend.motion.look( (float( np.clip(np.random.standard_normal() / 3 * np.pi / 2, -np.pi, np.pi)), float( np.clip( np.pi / 2 + np.random.standard_normal() / 10 * np.pi, 0, np.pi))), ExploreComponent.SPEED) def on_image(image): # type: (AbstractImage) -> None """ Private On Image Event. Simply used because it is called n times a second. Parameters ---------- image: AbstractImage """ # When no chat is currently happening if not context.context.chatting: # At a certain interval if time( ) - ExploreComponent.LAST_MOVE > ExploreComponent.TIMEOUT: explore() # Explore! ExploreComponent.LAST_MOVE = time() # Subscribe private on_image event to backend camera (which will call it regularly) self.backend.camera.callbacks += [on_image]
def __init__(self, backend): super(AbstractApplication, self).__init__(backend) # Find Events associated with Application (inherited from Components) self._events = {k: v for k, v in self.__dict__.items() if k.startswith(self._EVENT_TAG) and callable(v)} self._log = logger.getChild(self.__class__.__name__) self.log.debug("Booted")
def __init__(self): """ Access beliefs about this interaction, but forget after turn off """ self.uniqueObjects = set() self.allObjects = list() self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted")
def __init__(self, language): self._language = language self._queue = Queue() self._talking_jobs = 0 self._scheduler = Scheduler(self._worker, name="TextToSpeechThread") self._scheduler.start() self._log = logger.getChild(self.__class__.__name__)
def __init__(self, language=config.APPLICATION_LANGUAGE): """ Abstract Automatic Speech Recognition Class Parameters ---------- language: str """ self._language = language self._log = logger.getChild("{} ({})".format(self.__class__.__name__, self.language))
def __init__(self, utterance): if not Parser.POS_TAGGER: Parser.POS_TAGGER = POS() with open(Parser.CFG_GRAMMAR_FILE) as cfg_file: self._cfg = cfg_file.read() self._log = logger.getChild(self.__class__.__name__) self._forest, self._constituents = self._parse(utterance)
def __init__(self, language=config.APPLICATION_LANGUAGE, sample_rate=config.MICROPHONE_SAMPLE_RATE, hints=()): AbstractASR.__init__(self, language) GoogleTranslator.__init__(self, config.APPLICATION_LANGUAGE, config.INTERNAL_LANGUAGE) self._client = speech.SpeechClient() self._config = speech.types.RecognitionConfig( encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=sample_rate, language_code=language, max_alternatives=self.MAX_ALTERNATIVES, speech_contexts=[speech.types.SpeechContext(phrases=hints)]) self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted ({} -> {})".format(self.source, self.target))
def __init__(self, classifier='english.all.3class.distsim.crf.ser'): self._log = logger.getChild(self.__class__.__name__) self._port = self._find_free_port() self._ner_server_process = None self._ner_server_thread = Thread(target=self._start_server, args=(classifier, )) self._ner_server_thread.daemon = True self._ner_server_thread.start() self._log.debug("Booted: ({}:{})".format(self.IP, self._port))
def __init__(self, backend): """ Construct Component Parameters ---------- backend: AbstractBackend """ super(AbstractComponent, self).__init__() self._backend = backend self._log = logger.getChild(self.__class__.__name__)
def __init__(self, app, responders): # type: (AbstractApplication, List[Responder]) -> None self._app = app self._responders = responders self._groups = [[r for r in responders if r.type == t] for t in RESPONDER_TYPES] self._log = logger.getChild(self.__class__.__name__) self._check_requirements()
def __init__(self): # type: () -> RdfBuilder self.ontology_paths = {} self.namespaces = {} self.dataset = Dataset() self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted") self._define_namespaces() self._bind_namespaces() self.define_named_graphs() self.load_ontology_integration()
def __init__(self, chat): """ Abstract Analyzer Object: call Analyzer.analyze(utterance) factory function Parameters ---------- chat: Chat Chat to be analyzed """ if not Analyzer.NER: Analyzer.NER = NER('english.muc.7class.distsim.crf.ser') self._chat = chat self._log = logger.getChild(self.__class__.__name__)
def __init__(self, speaker, context): """ Create Chat Parameters ---------- speaker: str Name of speaker (a.k.a. the person Pepper has a chat with) """ self._id = getrandbits(128) self._context = context self._speaker = speaker self._utterances = [] self._log = logger.getChild("{} ({})".format(self.__class__.__name__, self.speaker))
def __init__(self, address=config.BRAIN_URL_LOCAL): """ Interact with Triple store Parameters ---------- address: str IP address and port of the Triple store """ self.address = address self.namespaces = {} self.ontology_paths = {} self.format = 'trig' self.dataset = Dataset() self.query_prefixes = """ prefix gaf: <http://groundedannotationframework.org/gaf#> prefix grasp: <http://groundedannotationframework.org/grasp#> prefix leolaniInputs: <http://cltl.nl/leolani/inputs/> prefix leolaniFriends: <http://cltl.nl/leolani/friends/> prefix leolaniTalk: <http://cltl.nl/leolani/talk/> prefix leolaniTime: <http://cltl.nl/leolani/time/> prefix leolaniWorld: <http://cltl.nl/leolani/world/> prefix n2mu: <http://cltl.nl/leolani/n2mu/> prefix ns1: <urn:x-rdflib:> prefix owl: <http://www.w3.org/2002/07/owl#> prefix prov: <http://www.w3.org/ns/prov#> prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> prefix sem: <http://semanticweb.cs.vu.nl/2009/11/sem/> prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix time: <http://www.w3.org/TR/owl-time/#> prefix xml: <http://www.w3.org/XML/1998/namespace> prefix xml1: <https://www.w3.org/TR/xmlschema-2/#> prefix xsd: <http://www.w3.org/2001/XMLSchema#> """ self._define_namespaces() self._get_ontology_path() self._bind_namespaces() self.my_uri = None self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted")
def __init__(self, application): """ Parameters ---------- application: Application """ self._application = application # Reset Application Events to their default # This prevents events from previous Intention to still be called! self.application._reset_events() # Subscribe to all Application Events for dependency in self.dependencies: self.require_dependency(dependency) self._log = logger.getChild(self.__class__.__name__) self.log.info("<- Switched Intention")
def __init__(self, rate, channels, callbacks): self._rate = rate self._channels = channels self._callbacks = callbacks self._dt_threshold_multiplier = 1.5 self._dt_buffer = deque([], maxlen=32) self._true_rate = rate self._t0 = time() self._queue = Queue() self._processor_scheduler = Scheduler(self._processor, 0, name="MicrophoneThread") self._processor_scheduler.start() self._log = logger.getChild(self.__class__.__name__) self._running = False
def __init__(self): class BaseHandler(tornado.web.RequestHandler): def get(self): loader = tornado.template.Loader(os.path.dirname(__file__)) self.write(loader.load("index.html").generate()) class WSHandler(tornado.websocket.WebSocketHandler): def __init__(self, application, request, **kwargs): super(WSHandler, self).__init__(application, request, **kwargs) def open(self): VideoFeedApplication.HANDLERS.add(self) def on_close(self): VideoFeedApplication.HANDLERS.remove(self) super(VideoFeedApplication, self).__init__([(r'/ws', WSHandler), (r'/', BaseHandler)]) self._log = logger.getChild(self.__class__.__name__)
def __init__(self, people, n_neighbors=20): # type: (Dict[str, np.ndarray], int) -> None self._people = people self._n_neighbors = n_neighbors self._names = sorted(self.people.keys()) self._indices = range(len(self._names)) if self.people: self._labels = np.concatenate( [[index] * len(self.people[name]) for name, index in zip(self._names, self._indices)]) self._features = np.concatenate( [self.people[name] for name in self._names]) self._classifier = KNeighborsClassifier(self._n_neighbors) self._classifier.fit(self._features, self._labels) self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted")
def __init__(self, backend): """ Create Application Parameters ---------- backend: AbstractBackend """ super(Application, self).__init__(backend) self._brain = LongTermMemory() self._events = { attr: self.__getattribute__(attr) for attr in dir(self) if attr.startswith(self.EVENT_TAG) and callable(self.__getattribute__(attr)) } self._log = logger.getChild(self.__class__.__name__) self.log.debug("Booted")
def __init__(self, application): self._application = application # Reset Application Events to their default # This prevents events from previous Intention to still be called! self.application._reset_events() # Subscribe to all Application Events for dependency in self.dependencies: self.require_dependency(dependency) # Subscribe to all Application Members self.__dict__.update({ k: v for k, v in self.application.__dict__.items() if k not in self.__dict__ }) self._log = logger.getChild(self.__class__.__name__) self.log.info("<- Switched Intention")
def __init__(self, microphone, callbacks, mode=3): """ Detect Utterances of People using Voice Activity Detection Parameters ---------- microphone: AbstractMicrophone Microphone to extract Utterances from callbacks: list of callable On Utterance Callback mode: int Voice Activity Detection (VAD) 'Aggressiveness' (1..3) """ self._microphone = microphone self._microphone.callbacks += [self._on_audio] self._rate = microphone.rate self._callbacks = callbacks self._vad = Vad(mode) # Number of Elements (np.int16) in Frame self._frame_size = self.FRAME_MS * self.rate // 1000 self._ringbuffer_index = 0 self._activation = 0 # Initialize Ringbuffers, which will hold Audio data and Vad.is_speech results, respectively self._audio_ringbuffer = np.zeros((self.BUFFER_SIZE, self._frame_size), np.int16) self._vad_ringbuffer = np.zeros(self.BUFFER_SIZE, np.bool) self._audio_buffer = bytearray( ) # Audio Buffer will be filled with raw Microphone Audio self._voice_buffer = bytearray( ) # Voice Buffer will be filled with Voiced Audio self._voice = False # No Voice is present at start self._log = logger.getChild(self.__class__.__name__) self._log.debug("Booted")
def __init__(self, application): # type: (AbstractApplication) -> None self._application = application # Reset Application Events to their default # This prevents events from previous Intention to still be called! self.application._reset_events() # Subscribe to all Application Events, while making sure all Dependencies are met. for dependency in self.dependencies: self.require_dependency(dependency) # Subscribe to all Application Members, essentially becoming the Application self.__dict__.update({ k: v for k, v in self.application.__dict__.items() if k not in self.__dict__ }) # Update User of Intention Switch self._log = logger.getChild(self.__class__.__name__) self.log.info("<- Switched Intention")