def react_to_feedback(ctx: rs.ContextWrapper): if ctx[prop_game_in_progress]: rand = random.random() if ctx[nlp.prop_yesno].yes(): ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_winning_exclamations") if rand < 0.5: emotion = SUNGLASSES_ON_EMOTION ctx[prop_sunglasses_on] = True else: emotion = HEARTS_EMOTION if ctx.conf(key=USE_EMOTIONS): emo_client(emotion) ctx[prop_feedback_received] = True logger.info("correct") elif ctx[nlp.prop_yesno].no(): if ctx[prop_guess_attempt_count] < 3: ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_new_guess_attempt") ctx[prop_another_attempt] = True logger.info("wrong guess") else: ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_losing_exclamations") if ctx.conf(key=USE_EMOTIONS): emo_client(SHY_EMOTION) ctx[prop_feedback_received] = True else: ctx[prop_feedback_received] = False return rs.Emit() else: return rs.Resign()
def send_on_telegram(ctx: rs.ContextWrapper, text: str): """ If all telegram chats should be in the same context, sends the text to every currently active chat. Otherwise it only sends output using the Pipe if it is a child process """ if not text or not isinstance(text, str): return rs.Resign() if ctx.conf(key=ALL_IN_ONE_CONTEXT_CONFIG_KEY): # TODO don't instantiate the updater every time token = ctx.conf(key=TOKEN_CONFIG_KEY) if not token: logger.error('telegram-token is not set. Shutting down telegramio') return rs.Delete() updater: Updater = Updater(token) for chat_id in active_chats.keys(): updater.bot.send_message(chat_id=chat_id, text=text) else: child_conn = ctx.conf(key=CHILD_CONN_CONFIG_KEY) if child_conn: # Child Process -> write to Pipe child_conn.send(text) else: # Master Process -> State not needed return rs.Delete()
def move_head(ctx: rs.ContextWrapper): for axis, lower, upper in [(prop_head_axis0, ctx.conf(key=AXIS0_LOWER_LIMIT_KEY), ctx.conf(key=AXIS0_UPPER_LIMIT_KEY)), (prop_head_axis1, ctx.conf(key=AXIS1_LOWER_LIMIT_KEY), ctx.conf(key=AXIS1_UPPER_LIMIT_KEY)), (prop_head_axis2, ctx.conf(key=AXIS2_LOWER_LIMIT_KEY), ctx.conf(key=AXIS2_UPPER_LIMIT_KEY))]: data = random.uniform(lower, upper) if random.random() < ctx.conf(key=HEAD_MOVEMENT_PROBABILITY_KEY): # move or don't move axis with probability logger.info(f"Publishing {data} to {axis.topic}") ctx[axis] = Float32(data=data)
def save_face(ctx: rs.ContextWrapper, id, face_vector): try: redis_conn = redis.Redis( host=ctx.conf(key=REDIS_HOST_CONF), port=ctx.conf(key=REDIS_PORT_CONF), password=ctx.conf(key=REDIS_PASS_CONF)) redis_conn.set(id, pickle.dumps(face_vector)) except redis.exceptions.ConnectionError as e: err_msg = "Looks like the redis connection is unavailable :-(" logger.error(err_msg)
def reset(ctx: rs.ContextWrapper): """ If there is no call for given seconds, removes the interlocutor node and initializes face oracle filter from scratch. """ # Remove interloc if present if any(ctx.enum(interloc.prop_all)): popped_node = ctx.pop( f'interloc:all:{interloc.ANON_INTERLOC_ID}') if popped_node: logger.info( "Visual contact is broken, removed the interlocutor node" ) # Install a new FaceOracleFilter ctx[prop_face_filter] = FaceOracleFilter()
def ping_activity_choice(ctx: rs.ContextWrapper): if ctx[prop_game_in_progress]: rand = random.random() pinged_times = ctx[prop_ping_choice_count] if pinged_times < 5: if rand < 0.4 and ctx.conf(key=USE_EMOTIONS): emo_client(LOOK_RIGHT_EMOTION) elif (rand > 0.4 and rand < 0.8 and ctx.conf(key=USE_EMOTIONS)): emo_client(LOOK_LEFT_EMOTION) ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_ping_activity_choice") ctx[prop_ping_choice_count] = pinged_times + 1 return rs.Emit() else: ctx[prop_stop_game] = True
def move_eyes(ctx: rs.ContextWrapper): if random.random() < ctx.conf(key=EYE_MOVEMENT_PROBABILITY_KEY): if random.random() < 0.5: logger.info(f"Publishing {LOOK_LEFT_EMOTION} to {prop_move_eyes.topic}") ctx[prop_move_eyes] = String(data=LOOK_LEFT_EMOTION) else: logger.info(f"Publishing {LOOK_RIGHT_EMOTION} to {prop_move_eyes.topic}") ctx[prop_move_eyes] = String(data=LOOK_RIGHT_EMOTION)
def push_telegram_interloc(ctx: rs.ContextWrapper, telegram_node: Node, name: str): """ Push the telegram_node into interloc:all:name """ if ctx.push(parent_property_or_path=interloc.prop_all, child=rs.Property(name=name, default_value=telegram_node)): logger.debug(f"Pushed {telegram_node} to interloc:all")
def react_to_continue_decision(ctx: rs.ContextWrapper): rand = random.random() if ctx[nlp.prop_yesno].yes(): ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_positive_expressions") + " Let's continue then" if rand < 0.7 and ctx.conf(key=USE_EMOTIONS): emo_client(LUCKY_EMOTION) return rs.Emit() elif ctx[nlp.prop_yesno].no(): ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_no_continuation") if ctx.conf(key=USE_EMOTIONS): emo_client(KISS_EMOTION) ctx[prop_stop_game] = True else: ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_misunderstanding") ctx[prop_continuation_unclear] = True
def ask_to_continue(ctx: rs.ContextWrapper): if ctx[prop_feedback_received]: ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_offer_another_round") if ctx[prop_sunglasses_on] and ctx.conf(key=USE_EMOTIONS): emo_client(SUNGLASSES_ON_EMOTION) ctx[prop_sunglasses_on] = False return rs.Emit() else: return rs.Resign()
def process_play_decision(ctx: rs.ContextWrapper): if not ctx[prop_game_in_progress]: if ctx[nlp.prop_yesno].yes(): ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_positive_expressions") + \ " Do you want to hear the rules?" if ctx.conf(key=USE_EMOTIONS): emo_client(SMILEBLINK_EMOTION) return rs.Emit() elif ctx[nlp.prop_yesno].no(): if random.random() < 0.5 and ctx.conf(key=USE_EMOTIONS): emo_client(ROLL_EYES_EMOTION) ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_refuse_offer") ctx[prop_stop_game] = True else: ctx[prop_decision_unclear] = True ctx[rawio.prop_out] = verbaliser.get_random_phrase( "charades_misunderstanding") else: return rs.Resign()
def store_face_and_name(ctx: rs.ContextWrapper): tokens = ctx[nlp.prop_tokens] triples = ctx[nlp.prop_triples] if len(tokens) == 1: name = tokens[0] elif triples[0].get_object().text and triples[0].match_either_lemma( pred={"be"}): name = triples[0].get_object().text else: ctx["rawio:out"] = "Sorry, what was the name?" return rs.Emit() ctx["rawio:out"] = f"Got it, I'm sure I'll remember {name} next time I see that face!" # Create memory entry sess: Session = ravestate_ontology.get_session() onto: Ontology = ravestate_ontology.get_ontology() query = Node(metatype=onto.get_type("Person")) query.set_properties({"name": name}) node_list = sess.retrieve(query) if not node_list: node = sess.create(query) logger.info(f"Created new Node in scientio session: {node}") elif len(node_list) == 1: node = node_list[0] else: logger.error( f'Failed to create or retrieve Scientio Node for {name}!') return logger.info(f"Node ID for {name} in picture is {node.get_id()}!") # Store face vector with node id in redis try: redis_conn = redis.Redis(host=ctx.conf(key=REDIS_HOST_CONF), port=ctx.conf(key=REDIS_PORT_CONF), password=ctx.conf(key=REDIS_PASS_CONF)) redis_conn.set(node.get_id(), ctx["sendpics:face_vec"]) except redis.exceptions.ConnectionError as e: err_msg = "Looks like the redis connection is unavailable :-(" logger.error(err_msg) ctx[rawio.prop_out] = err_msg
def stop_game_session(ctx: rs.ContextWrapper): ''' go off the game session so new game can be started ''' if not ctx[prop_game_stopped]: if ctx[prop_sunglasses_on] and ctx.conf(key=USE_EMOTIONS): emo_client(SUNGLASSES_ON_EMOTION) ctx[prop_sunglasses_on] = False ctx[prop_game_in_progress] = False ctx[prop_waiting_for_label] = False ctx[prop_ping_choice_count] = 0 ctx[prop_game_stopped] = True ctx[rawio.prop_out] = "I will stop now but if you want to " \ "play again just tell me"
def start_recording(ctx: rs.ContextWrapper): if ctx.conf(key=USE_EMOTIONS): emo_client(HYPNO_EMOTION) resp = recognition_client() if resp: global count_round count_round = count_round + 1 logger.info(f"Round {count_round}") ctx[rawio.prop_out] = "Beep! Now let me think a little bit" ctx[prop_waiting_for_label] = True ctx[prop_guess_attempt_count] = 0 return rs.Emit() else: ctx[rawio.prop_out] = BROKEN_MESSAGE ctx[prop_stop_game] = True
def gpt3(ctx: rs.ContextWrapper): append_to_history(ctx.conf(key=HUMAN_PROMPT_PREFIX_KEY), ctx[rawio.prop_in]) prompt = ctx.conf(key=PROMPT_KEY) + "\n".join(history + ctx.conf( key=ROBOY_PROMPT_PREFIX_KEY)) logger.info(prompt) result = requests.post( "https://api.openai.com/v1/engines/davinci/completions", json={ "prompt": prompt, "temperature": ctx.conf(key=TEMPERATURE_KEY), "max_tokens": ctx.conf(key=MAX_TOKENS_KEY), "top_p": ctx.conf(key=MIN_PROB_KEY), "stop": ctx.conf(key=STOP_INDICATORS_KEY) }, headers={ "Authorization": f"Bearer {ctx.conf(key=GPT3_API_KEY)}" }).json() logger.info(result) ctx[rawio.prop_out] = result["choices"][0]["text"]
def create_small_talk_states(ctx: rs.ContextWrapper, interloc_path: str): used_follow_up_preds = set() @rs.state(cond=idle.sig_bored_by_user, write=(rawio.prop_out, prop_predicate, prop_subject), read=(interloc_path, prop_predicate), weight=1.2, cooldown=40., emit_detached=True, signal=sig_follow_up) def small_talk(ctx: rs.ContextWrapper): sess: Session = mem.get_session() interloc: Node = ctx[interloc_path] if interloc.get_id( ) < 0: # ask for name, if the interlocutor is not (yet) a persistent instance pred = "NAME" else: pred = find_empty_relationship(interloc.get_relationships()) ctx[prop_subject] = interloc_path if not ctx[prop_predicate]: if pred: logger.info(f"Personal question: intent={pred}") ctx[prop_predicate] = pred ctx[rawio.prop_out] = verbaliser.get_random_question(pred) else: unused_fup_preds = PREDICATE_SET.difference( used_follow_up_preds) if not unused_fup_preds: logger.info( f"Ran out of smalltalk predicates for {interloc_path}, committing suicide..." ) return rs.Delete(resign=True) pred = random.sample( PREDICATE_SET.difference(used_follow_up_preds), 1) pred = pred[0] used_follow_up_preds.add(pred) ctx[prop_predicate] = pred relationship_ids: Set[int] = interloc.get_relationships( pred) if len(relationship_ids) > 0: # Just to be safe ... object_node_list = sess.retrieve( node_id=list(relationship_ids)[0]) if len(object_node_list) > 0: ctx[rawio. prop_out] = verbaliser.get_random_followup_question( pred).format( name=interloc.get_name(), obj=object_node_list[0].get_name()) logger.info(f"Follow-up: intent={pred}") return rs.Emit() return rs.Resign() else: # While the predicate is set, repeat the question. Once the predicate is answered, # it will be set to None, such that a new predicate is entered. ctx[rawio.prop_out] = verbaliser.get_random_question( ctx[prop_predicate]) @rs.state(cond=sig_follow_up.max_age(-1.) & nlp.prop_triples.changed(), write=(rawio.prop_out, prop_predicate, prop_inference_mutex), read=(interloc_path, prop_predicate, nlp.prop_yesno)) def fup_react(ctx: rs.ContextWrapper): sess: Session = mem.get_session() subject_node: Node = ctx[interloc_path] pred = ctx[prop_predicate] object_node_list = [] relationship_ids: Set[int] = subject_node.get_relationships(pred) if len(relationship_ids) > 0: object_node_list = sess.retrieve( node_id=list(relationship_ids)[0]) if len(object_node_list) > 0: ctx[rawio.prop_out] = verbaliser.get_random_followup_answer( pred).format(name=subject_node.get_name(), obj=object_node_list[0].get_name()) else: ctx[rawio.prop_out] = "Oh, I see!" ctx[prop_predicate] = None ctx.add_state(small_talk) ctx.add_state(fup_react)
def sync_ros_properties(ctx: rs.ContextWrapper): """ State that creates a ROS2-Node, registers all Ros2SubProperties and Ros2PubProperties in ROS2 and keeps them synced """ global global_prop_set, global_node # check for ROS2 availability if not ROS2_AVAILABLE: logger.error( "ROS2 is not available, therefore all ROS2-Properties " "will be just normal properties without connection to ROS2!") return rs.Delete() # get config stuff node_name = ctx.conf(key=NODE_NAME_CONFIG_KEY) if not node_name: logger.error( f"{NODE_NAME_CONFIG_KEY} is not set. Shutting down ravestate_ros2") return rs.Delete() spin_frequency = ctx.conf(key=SPIN_FREQUENCY_CONFIG_KEY) if spin_frequency is None or spin_frequency < 0: logger.error( f"{SPIN_FREQUENCY_CONFIG_KEY} is not set or less than 0. Shutting down ravestate_ros2" ) return rs.Delete() if spin_frequency == 0: spin_sleep_time = 0 else: spin_sleep_time = 1 / spin_frequency # init ROS if not rclpy.ok(): rclpy.init() if not global_node: global_node = rclpy.create_node(node_name) global global_prop_set # current_props: hash -> subscription/publisher current_props: Dict = dict() # ROS-Context Sync Loop while not ctx.shutting_down(): # remove deleted props removed_props = current_props.keys() - global_prop_set for prop_hash in removed_props: item = current_props[prop_hash] if isinstance(item, rclpy.subscription.Subscription): global_node.destroy_subscription(item) elif isinstance(item, rclpy.publisher.Publisher): global_node.destroy_publisher(item) elif isinstance(item, rclpy.client.Client): global_node.destroy_client(item) current_props.pop(prop_hash) # add new props new_props = global_prop_set - current_props.keys() for prop in new_props: # register subscribers in ROS if isinstance(prop, Ros2SubProperty): # register in context @rs.receptor(ctx_wrap=ctx, write=prop.id()) def ros_to_ctx_callback(ctx, msg, prop_name: str): ctx[prop_name] = msg prop.ros_to_ctx_callback = ros_to_ctx_callback prop.subscription = global_node.create_subscription( prop.msg_type, prop.topic, prop.ros_subscription_callback) current_props[prop.__hash__()] = prop.subscription # register publishers in ROS if isinstance(prop, Ros2PubProperty): prop.publisher = global_node.create_publisher( prop.msg_type, prop.topic) current_props[prop.__hash__()] = prop.publisher # register clients in ROS if isinstance(prop, Ros2CallProperty): prop.client = global_node.create_client( prop.service_type, prop.service_name) current_props[prop.__hash__()] = prop.client # replace prop with hash in global_props global_prop_set.remove(prop) global_prop_set.add(prop.__hash__()) # spin once rclpy.spin_once(global_node, timeout_sec=0) time.sleep(spin_sleep_time) global_node.destroy_node() rclpy.shutdown()
def is_affectionate(ctx: rs.ContextWrapper): if any(l in ctx[nlp.prop_lemmas] for l in AFFECTIONATE_LIST) and \ random.random() < ctx.conf(key=AFFECTIONATE_PROB_KEY): logger.debug(f"Emitting {sig_affectionate.name}") return rs.Emit() return rs.Resign()
def am_i_bored_by_user(ctx: rs.ContextWrapper): """ Emits idle:bored-by-user idle:bored as emitted and there is a present interlocutor. """ if any(ctx.enum(interloc.prop_all)): return rs.Emit(wipe=True)
def is_surprised(ctx: rs.ContextWrapper): if random.random() < ctx.conf(key=SURPRISED_PROB_KEY): logger.debug(f"Emitting {sig_surprise.name}") return rs.Emit() return rs.Resign()
def telegram_run(ctx: rs.ContextWrapper): """ Starts up the telegram bot and adds a handler to write incoming messages to rawio:in """ @rs.receptor(ctx_wrap=ctx, write=rawio.prop_in) def text_receptor(ctx: rs.ContextWrapper, message_text: str): """ Writes the message_text to rawio:in """ ctx[rawio.prop_in] = message_text @rs.receptor(ctx_wrap=ctx, write=rawio.prop_pic_in) def photo_receptor(ctx: rs.ContextWrapper, photo_path): """ Handles photo messages, write to rawio:pic_in """ ctx[rawio.prop_pic_in] = photo_path @rs.receptor(ctx_wrap=ctx, write=interloc.prop_all) def push_telegram_interloc(ctx: rs.ContextWrapper, telegram_node: Node, name: str): """ Push the telegram_node into interloc:all:name """ if ctx.push(parent_property_or_path=interloc.prop_all, child=rs.Property(name=name, default_value=telegram_node)): logger.debug(f"Pushed {telegram_node} to interloc:all") def make_sure_effective_user_exists(update: Update): """ Retrieves scientio Node of User if it exists, otherwise creates it in the scientio session Calls the push_telegram_interloc receptor to push the scientio node into interloc:all Adds the User to the set of active_users and the chat to the set of active_chats """ active_chats[update.effective_chat.id] = (Timestamp(), None) if update.effective_user.id in active_users: active_users[update.effective_user.id].add( update.effective_chat.id) else: # set up scientio if ontology.initialized.wait(): sess: Session = ontology.get_session() onto: Ontology = ontology.get_ontology() # create scientio Node of type TelegramPerson query = Node(metatype=onto.get_type("TelegramPerson")) prop_dict = {'telegram_id': update.effective_user.id} if update.effective_user.username: prop_dict['name'] = update.effective_user.username if update.effective_user.full_name: prop_dict['full_name'] = update.effective_user.full_name query.set_properties(prop_dict) node_list = sess.retrieve(query) if not node_list: telegram_node = sess.create(query) logger.info( f"Created new Node in scientio session: {telegram_node}" ) elif len(node_list) == 1: telegram_node = node_list[0] else: logger.error( f'Found multiple TelegramPersons that matched query: {update.message.chat_id} ' f'in scientio session. Cannot push node to interloc:all!' ) return # push chat-Node push_telegram_interloc(telegram_node, update.effective_chat.id) active_users[update.effective_user.id] = { update.effective_chat.id } def handle_text(bot: Bot, update: Update): """ Handle incoming text messages """ make_sure_effective_user_exists(update) text_receptor(update.effective_message.text) def handle_photo(bot: Bot, update: Update): """ Handle incoming photo messages. """ make_sure_effective_user_exists(update) photo_index = 2 # Seems like a good size index. TODO: Make configurable while photo_index >= len(update.effective_message.photo): photo_index -= 1 if photo_index < 0: logger.error( "Telegram photo handler was called, but no photo received!" ) return file_descr = bot.get_file( update.effective_message.photo[photo_index].file_id) photo = requests.get(file_descr.file_path) file_path = mkstemp()[1] with open(file_path, 'wb') as file: file.write(photo.content) photo_receptor(file_path) def handle_input_multiprocess(bot: Bot, update: Update): """ Handle incoming messages """ if update.effective_chat.id not in active_chats: add_new_child_process(update.effective_chat.id) # write (bot, update) to Pipe active_chats[update.effective_chat.id][0].update() logger.info(f"INPUT: {update.effective_message.text}") active_chats[update.effective_chat.id][1].send((bot, update)) # send typing symbol bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING) def add_new_child_process(chat_id): """ Adds the chat of the incoming message to the set of active_chats Creates new Ravestate Context in new Process for the new chat and sets up a bidirectional Pipe for communication between Master and Child Processes """ # start method has to be 'spawn' mp_context = mp.get_context('spawn') # Pipe to communicate between Master Process and all children parent_conn, child_conn = mp.Pipe() # create commandline args for child config file args = [] child_config_paths_list = ctx.conf(key=CHILD_FILES_CONFIG_KEY) for child_config_path in child_config_paths_list: args += ['-f', child_config_path] # set up new Process and override child_conn with the Pipe-Connection p = mp_context.Process(target=rs.create_and_run_context, args=(*args, ), kwargs={ 'runtime_overrides': [(MODULE_NAME, CHILD_CONN_CONFIG_KEY, child_conn)] }) p.start() active_chats[chat_id] = (Timestamp(), parent_conn) def error(bot: Bot, update: Update, error: TelegramError): """ Log Errors caused by Updates. """ logger.warning( f'Update {update.effective_message} caused error {error.message}') def _manage_children(updater): """ Receive messages from children via Pipe and then send them to corresponding Telegram Chat. Remove chats when they get older than the chat lifetime. :param updater: The Updater of the telegram-Bot """ chat_lifetime = ctx.conf( key=CHAT_LIFETIME) * 60 # conversion from minutes to seconds while not ctx.shutting_down(): removable_chats = set() removable_users = set() # wait for children to write to Pipe and then send message to chat tick_interval = 1. / ctx.conf(mod=rs.CORE_MODULE_NAME, key=rs.TICK_RATE_CONFIG_KEY) time.sleep(tick_interval) for chat_id, (last_msg_timestamp, parent_pipe) in active_chats.items(): if parent_pipe.poll(): try: msg = parent_pipe.recv() if isinstance(msg, str): logger.info(f"OUTPUT: {msg}") updater.bot.send_message(chat_id=chat_id, text=msg) else: logger.error( f'Tried sending non-str object as telegram message: {str(msg)}' ) except EOFError: # Child pipe was closed parent_pipe.close() removable_chats.add(chat_id) continue # remove chat from active_chats if inactive for too long if last_msg_timestamp.age() > chat_lifetime: parent_pipe.close() removable_chats.add(chat_id) for chat_id in removable_chats: active_chats.pop(chat_id) for user_id, chat_ids in active_users.items(): # remove chat from chats that the user is part of chat_ids.discard(chat_id) if len(chat_ids) == 0: # user is no longer part of any active chats removable_users.add(user_id) for user_id in removable_users: active_users.pop(user_id) def _bootstrap_telegram_master(): """ Handle TelegramIO as the Master Process. Start the bot, and handle incoming telegram messages. """ token = ctx.conf(key=TOKEN_CONFIG_KEY) if not token: logger.error( f'{TOKEN_CONFIG_KEY} is not set. Shutting down telegramio') return rs.Delete() child_config_paths_list = ctx.conf(key=CHILD_FILES_CONFIG_KEY) if not ctx.conf(key=ALL_IN_ONE_CONTEXT_CONFIG_KEY) and ( not child_config_paths_list or not isinstance(child_config_paths_list, list) or not all( os.path.isfile(child_config_path) for child_config_path in child_config_paths_list)): logger.error( f'{CHILD_FILES_CONFIG_KEY} is not set (correctly). Shutting down telegramio' ) return rs.Delete() updater: Updater = Updater(token) # Get the dispatcher to register handlers dispatcher: Dispatcher = updater.dispatcher if ctx.conf(key=ALL_IN_ONE_CONTEXT_CONFIG_KEY): # handle noncommand-messages with the matching handler dispatcher.add_handler(MessageHandler(Filters.text, handle_text)) dispatcher.add_handler(MessageHandler(Filters.photo, handle_photo)) else: dispatcher.add_handler( MessageHandler(Filters.text | Filters.photo, handle_input_multiprocess)) # log all errors dispatcher.add_error_handler(error) # Start the Bot updater.start_polling() # non blocking if not ctx.conf(key=ALL_IN_ONE_CONTEXT_CONFIG_KEY): _manage_children(updater) def _bootstrap_telegram_child(): """ Handle TelegramIO as a Child Process. Listen to Pipe and handle incoming texts and photos. """ try: while not ctx.shutting_down(): # receive Bot,Update for telegram chat bot, update = child_conn.recv() # blocking if update.effective_message.photo: handle_photo(bot, update) elif update.effective_message.text: if update.effective_message.text.strip().lower( ) in verbaliser.get_phrase_list("farewells"): send_on_telegram( ctx, verbaliser.get_random_phrase("farewells")) logger.info("Shutting down child process") ctx.shutdown() handle_text(bot, update) else: logger.error( f"{MODULE_NAME} received an update it cannot handle.") except EOFError: # Pipe was closed -> Parent was killed or parent has closed the pipe logger.info( "Pipe was closed, therefore the telegram-child will shut down." ) ctx.shutdown() child_conn = ctx.conf(key=CHILD_CONN_CONFIG_KEY) is_master_process = child_conn is None if is_master_process: return _bootstrap_telegram_master() else: _bootstrap_telegram_child()
def is_busy(ctx: rs.ContextWrapper): busy = True if any(ctx.enum(interloc.prop_all)) else False # Set this as param if you want to use it inside ws_comm, otherwise you can just assign a variable if ROS_AVAILABLE: rospy.set_param('roboy_is_busy', busy)
def sync_ros_properties(ctx: rs.ContextWrapper): """ State that creates a ROS1-Node, registers all Ros1SubProperties and Ros1PubProperties in ROS1 and keeps them synced """ global global_prop_set, global_node # check for ROS1 availability if not ROS1_AVAILABLE: logger.error( "ROS1 is not available, therefore all ROS1-Properties " "will be just normal properties without connection to ROS1!") return rs.Delete() # get config stuff node_name = ctx.conf(key=NODE_NAME_CONFIG_KEY) if not node_name: logger.error( f"{NODE_NAME_CONFIG_KEY} is not set. Shutting down ravestate_ros1") return rs.Delete() spin_frequency = ctx.conf(key=SPIN_FREQUENCY_CONFIG_KEY) if spin_frequency is None or spin_frequency < 0: logger.error( f"{SPIN_FREQUENCY_CONFIG_KEY} is not set or less than 0. Shutting down ravestate_ros1" ) return rs.Delete() if spin_frequency == 0: spin_sleep_time = 0 else: spin_sleep_time = 1 / spin_frequency # Use same node_name if ROS1 was already initialized (i.e. by importing pyroboy) if rospy.get_name(): node_name = rospy.get_name()[1:] # cut off leading / # init ROS1 #rospy.init_node(node_name, disable_signals=True) global global_prop_set # current_props: hash -> Subscriber/Publisher/ServiceProxy current_props: Dict = dict() # ROS1-Context Sync Loop while not ctx.shutting_down() and not rospy.core.is_shutdown(): # remove deleted props removed_props = current_props.keys() - global_prop_set for prop_hash in removed_props: item = current_props[prop_hash] item.unregister() current_props.pop(prop_hash) # add new props new_props = global_prop_set - current_props.keys() for prop in new_props: # register subscribers in ROS1 if isinstance(prop, Ros1SubProperty): # register in context @rs.receptor(ctx_wrap=ctx, write=prop.id()) def ros_to_ctx_callback(ctx, msg, prop_name: str): ctx[prop_name] = msg prop.ros_to_ctx_callback = ros_to_ctx_callback prop.subscriber = rospy.Subscriber( prop.topic, prop.msg_type, prop.ros_subscription_callback) current_props[prop.__hash__()] = prop.subscriber # register publishers in ROS1 if isinstance(prop, Ros1PubProperty): prop.publisher = rospy.Publisher(prop.topic, prop.msg_type, queue_size=prop.queue_size) current_props[prop.__hash__()] = prop.publisher # register clients in ROS1 if isinstance(prop, Ros1CallProperty): prop.client = rospy.ServiceProxy(prop.service_name, prop.service_type) current_props[prop.__hash__()] = prop.client # replace prop with hash in global_props global_prop_set.remove(prop) global_prop_set.add(prop.__hash__()) rospy.rostime.wallsleep(spin_sleep_time) rospy.signal_shutdown("ravestate_ros1 is shutting down")
def recognize_faces(ctx: rs.ContextWrapper): """ Activates with each incoming face data served by face oracle. Responsible for synchronizing the node of person in vision with the anonymous interlocutor node. Uses face oracle filter to organize the incoming data and find out the right person. """ face_filter: FaceOracleFilter = ctx[prop_face_filter] faces: Faces = ctx[prop_subscribe_faces] # Push faces to face filter best_guess_changed = face_filter.push_message(faces) if best_guess_changed: current_best_guess: Person = face_filter.current_best_guess onto: Ontology = mem.get_ontology() sess: Session = mem.get_session() person_node = Node(metatype=onto.get_type("Person")) best_guess_id = current_best_guess.id face_vector = current_best_guess.face_vector if current_best_guess.is_known: person_node_query = sess.retrieve(node_id=best_guess_id) if person_node_query: person_node = person_node_query[0] else: err_msg = "Person with id %s is not found in memory." % best_guess_id logger.error(err_msg) return else: person_node.set_properties({ 'face_vector': face_vector, 'name': interloc.ANON_INTERLOC_ID }) push = False # Check if there is any interlocutor. If necessary and pop the current node and push person node # instead. if any(ctx.enum(interloc.prop_all)): interloc_node: Node = ctx[ f'interloc:all:{interloc.ANON_INTERLOC_ID}'] # If interloc and the person nodes are not same pop and push person node. if not (interloc_node.get_id() == person_node.get_id() ) or interloc_node.get_id() < 0: # Remove the current interloc logger.info('Popping current interlocutor') popped_node = ctx.pop( f'interloc:all:{interloc.ANON_INTERLOC_ID}') assert popped_node == True push = True else: # Update the face vector of the person already familiar with save_face(ctx, interloc_node.get_id(), current_best_guess.face_vector) else: push = True if push: # Push the new interlocutor ctx.push(parent_property_or_path=interloc.prop_all, child=rs.Property(name=interloc.ANON_INTERLOC_ID, default_value=person_node)) logger.info( f"Pushed node with id {person_node.id} to interloc:all" )
def console_input(ctx: rs.ContextWrapper): while not ctx.shutting_down(): input_value = input("> ") rawio.say(ctx, input_value)
def is_happy(ctx: rs.ContextWrapper): if random.random() < ctx.conf(key=HAPPY_PROB_KEY): logger.debug(f"Emitting {sig_happy.name}") return rs.Emit() return rs.Resign()