def post(self): voice = request.json['data'] speaked_at = request.json['speaked_at'] print('*** request.json ***', file=sys.stderr) # print(request.json, file=sys.stderr) print('*** ************ ***', file=sys.stderr) voice = base64.b64decode(voice) voice = conv_endian(voice) speaked_at = dateutil.parser.parse(speaked_at) print('**** speaked_at ****', file=sys.stderr) print(speaked_at, file=sys.stderr) print('*** ************ ***', file=sys.stderr) # voiceを認識 voice = recognizer.recognize(voice) print('****** voice *******', file=sys.stderr) print(voice, file=sys.stderr) print('*** ************ ***', file=sys.stderr) conversation = Conversation(content=voice, speaked_at=speaked_at) session.add(conversation) session.commit() return "ok"
def conversation(self): return Conversation(conversation_id=3, participant1=self.conversation_peers[0], participant2=self.conversation_peers[1], messages=self.messages, start_time=self.messages[0].time, end_time=self.messages[-1].time)
def experiment_fn(run_config, params): conversation = Conversation() estimator = tf.estimator.Estimator(model_fn=conversation.model_fn, model_dir=Config.train.model_dir, params=params, config=run_config) vocab = data_loader.load_vocab("vocab") Config.data.vocab_size = len(vocab) train_X, test_X, train_y, test_y = data_loader.make_train_and_test_set() train_input_fn, train_input_hook = data_loader.make_batch( (train_X, train_y), batch_size=Config.model.batch_size) test_input_fn, test_input_hook = data_loader.make_batch( (test_X, test_y), batch_size=Config.model.batch_size, scope="test") experiment = tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=test_input_fn, train_steps=Config.train.train_steps, min_eval_frequency=Config.train.min_eval_frequency, train_monitors=[ train_input_hook, hook.print_variables( variables=['train/enc_0', 'train/dec_0', 'train/pred_0'], vocab=vocab, every_n_iter=Config.train.check_hook_n_iter) ], eval_hooks=[test_input_hook], eval_delay_secs=0) return experiment
def main(): params = tf.contrib.training.HParams(**Config.model.to_dict()) run_config = tf.estimator.RunConfig( model_dir=Config.train.model_dir, save_checkpoints_steps=Config.train.save_checkpoints_steps, ) tf_config = os.environ.get('TF_CONFIG', '{}') tf_config_json = json.loads(tf_config) cluster = tf_config_json.get('cluster') job_name = tf_config_json.get('task', {}).get('type') task_index = tf_config_json.get('task', {}).get('index') cluster_spec = tf.train.ClusterSpec(cluster) server = tf.train.Server(cluster_spec, job_name=job_name, task_index=task_index) if job_name == "ps": tf.logging.info("Started server!") server.join() if job_name == "worker": with tf.Session(server.target): with tf.device( tf.train.replica_device_setter( worker_device="/job:worker/task:%d" % task_index, cluster=cluster)): tf.logging.info("Initializing Estimator") conversation = Conversation() estimator = tf.estimator.Estimator( model_fn=conversation.model_fn, model_dir=Config.train.model_dir, params=params, config=run_config) tf.logging.info("Initializing vocabulary") vocab = data_loader.load_vocab("vocab") Config.data.vocab_size = len(vocab) train_X, test_X, train_y, test_y = data_loader.make_train_and_test_set( ) train_input_fn, train_input_hook = data_loader.make_batch( (train_X, train_y), batch_size=Config.model.batch_size) test_input_fn, test_input_hook = data_loader.make_batch( (test_X, test_y), batch_size=Config.model.batch_size, scope="test") tf.logging.info("Initializing Specifications") train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000) eval_spec = tf.estimator.EvalSpec(input_fn=test_input_fn) tf.logging.info("Run training") tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def _make_estimator(): params = tf.contrib.training.HParams(**Config.model.to_dict()) # Using CPU run_config = tf.contrib.learn.RunConfig( model_dir=Config.train.model_dir, session_config=tf.ConfigProto(device_count={'GPU': 0})) conversation = Conversation() return tf.estimator.Estimator(model_fn=conversation.model_fn, model_dir=Config.train.model_dir, params=params, config=run_config)
def experiment_fn(run_config, params): # 先定义estimator conversation = Conversation() estimator = tf.estimator.Estimator(model_fn=conversation.model_fn, model_dir=Config.train.model_dir, params=params, config=run_config) # 返回字典 vocab = data_loader.load_vocab("vocab") Config.data.vocab_size = len(vocab) # 定义训练数据 train_X, test_X, train_y, test_y = data_loader.make_train_and_test_set() train_input_fn, train_input_hook = data_loader.make_batch( (train_X, train_y), batch_size=Config.model.batch_size) test_input_fn, test_input_hook = data_loader.make_batch( (test_X, test_y), batch_size=Config.model.batch_size, scope="test") train_hooks = [train_input_hook] if Config.train.print_verbose: train_hooks.append( hook.print_variables( variables=['train/enc_0', 'train/dec_0', 'train/pred_0'], rev_vocab=utils.get_rev_vocab(vocab), every_n_iter=Config.train.check_hook_n_iter)) if Config.train.debug: train_hooks.append(tf_debug.LocalCLIDebugHook()) eval_hooks = [test_input_hook] if Config.train.debug: eval_hooks.append(tf_debug.LocalCLIDebugHook()) # 定义实验 experiment = tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=test_input_fn, train_steps=Config.train.train_steps, min_eval_frequency=Config.train.min_eval_frequency, train_monitors=train_hooks, eval_hooks=eval_hooks, eval_delay_secs=0) return experiment
def main_route(): if request.method == 'POST': try: payload = request.get_json() for message, sender in messaging_events(payload): # if a convo with this sender exists, run the appropriate protocol #found = convos.find_one({"id": sender}).count() #if found != 0: # temp = Conversation(sender); # temp.prefs = found["prefs"] # temp.curState = found["curState"] # temp.id = found["id"] # temp.numBeds = found["numBeds"] # parse_and_respond(temp, message) # convos.update_one({"id": sender}, temp) #else: # temp = Conversation(sender); # convos.insert_one(temp) # message = "Initial question as result of function here" if sender in convos: parse_and_respond(sender, message) else: convos[sender] = Conversation(sender) parse_and_respond(sender, message) return "okay" except Exception as e: print(type(e)) print(e.args) elif request.method == 'GET': if request.args.get('hub.verify_token') == config.env['verify_token']: return request.args.get('hub.challenge') return "Wrong Verify Token" return "Hello World"
async def _instantiate_dialog(self, user: User, peer: Union[User, Bot]): log.info(f'instantiating the dialog') conversation = Conversation(participant1=ConversationPeer(peer=user), participant2=ConversationPeer(peer=peer)) profiles = await run_sync_in_executor(PersonProfile.objects) profiles_count = await run_sync_in_executor(profiles.count) for p in conversation.participants: p.assigned_profile = profiles[random.randrange(profiles_count)] while True: conv_id = random.getrandbits(31) if conv_id not in self._active_dialogs and \ await run_sync_in_executor(lambda: Conversation.objects(conversation_id=conv_id).count()) == 0: break conversation.conversation_id = conv_id self._active_dialogs[conv_id] = conversation for p in conversation.participants: target_gateway = self._gateway_for_peer(p) await target_gateway.start_conversation(conv_id, p.peer, p.assigned_profile) self._reset_inactivity_timer(conv_id)
def check_or_create_conversation(user_id): """Checks if there is a chat history between the current user and the other user_id. If there is, it loads the history, if there isn't it creates a row in the table. Users are assigned to messager1 or 2 depending on whose user is is larger. """ bigger = max(current_user.id, int(user_id)) lower = min(current_user.id, int(user_id)) conversation = Conversation.query.filter( (Conversation.messager_1 == bigger) & (Conversation.messager_2 == lower)).first() if not conversation: new_convo = Conversation(messager_1=bigger, messager_2=lower) db.session.add(new_convo) db.session.commit() # As soon as the conversation has been added to the database, query the database for the conversation. conversation = Conversation.query.filter( (Conversation.messager_1 == bigger) & (Conversation.messager_2 == lower)).first() return redirect(f'/chat/messages/{conversation.id}')
async def _instantiate_dialog(self, user: User, peer: Union[User, Bot]): log.info(f'instantiating the dialog') conversation = Conversation( participant1=ConversationPeer( peer=user, peer_conversation_guid=uuid4().__str__()), participant2=ConversationPeer( peer=peer, peer_conversation_guid=uuid4().__str__())) tags_set: QuerySet = Settings.objects(name='tags') active_tags = tags_set.first().value if tags_set.count() else [] if active_tags: profiles: QuerySet = await run_sync_in_executor( PersonProfile.objects(tags__in=active_tags)) if profiles.count() == 0: log.warning(f'Not found any profiles with tags: {active_tags}') profiles: QuerySet = await run_sync_in_executor( PersonProfile.objects) else: profiles: QuerySet = await run_sync_in_executor( PersonProfile.objects) first_profile = None linked_profile_uuid = None for p in conversation.participants: if first_profile is None: p.assigned_profile = first_profile = random.choice(profiles) linked_profile_uuid = first_profile.link_uuid else: # profiles assignment order: # other profile from the same linked group || profile with unmatching sentences || same profile second_profile = random.choice( profiles(id__ne=first_profile.id, link_uuid=linked_profile_uuid) or (profiles(persona__ne=first_profile.persona) or [first_profile])) p.assigned_profile = second_profile while True: conv_id = random.getrandbits(31) if conv_id not in self._active_dialogs and \ await run_sync_in_executor(lambda: Conversation.objects(conversation_id=conv_id).count()) == 0: break conversation.conversation_id = conv_id conversation.messages_to_switch_topic = self.dialog_options[ 'n_messages_to_switch_topic'] conversation.reset_topic_switch_counter() self._active_dialogs[conv_id] = conversation topics = [ 'Социальные сети', 'Фитнес', 'Уборка и чистота', 'Одиночество', 'Мода', 'Женская внешность', 'Мужская внешность', 'Деньги, богатство', 'Машины', 'Счастье' ] curr_topic = random.sample(topics, k=1) str_topic = curr_topic[0] msg = conversation.add_message(text='Switched to topic ' + str_topic, sender=peer, system=True) for p in conversation.participants: target_gateway = self._gateway_for_peer(p) p.assigned_profile.topics[0] = str_topic await target_gateway.start_conversation(conv_id, p.peer, p.assigned_profile, p.peer_conversation_guid) self._reset_inactivity_timer(conv_id)
def createConversation(userStarted, partner): global conversations c = Conversation(createNewUid(), userStarted, partner) conversations += [c] userStarted.conversations += [c] return c