from hare import Hare, Conversation from hare.tensorflowbrain import BiGruBrain mockhare = Hare() mockhare.brain = BiGruBrain() for i in range(10000): convo = Conversation() convo.add_utterance(speaker='a', content='c c c c c') convo.add_utterance(speaker='b', content='c c c c c') convo.add_utterance(speaker='b', content='c c c c b') convo.add_utterance(speaker='a', content='c c c c a') convo.label_speaker('b', 1) mockhare.add_conversation(convo) mockhare.train() mockhare.visualize_history_for_conversation()
elif line[0] == '#': try: current_conversation.label_speaker(line.split()[1], 1) except IndexError: continue conversations.append(current_conversation) current_conversation = Conversation() if len(conversations) % 100 == 0: print(len(conversations)) if len(conversations) == NR_OF_CONVERSATIONS: break continue speaker, content = line.split('\t') current_conversation.add_utterance(speaker, content) #Add to a hare object moba_hare = Hare() for conversation in conversations: moba_hare.add_conversation(conversation) moba_hare.brain = BiGruBrain() moba_hare.brain.downsampling = True moba_hare.brain._max_sequence_length = 500 moba_hare.train() moba_hare.save('moba')
from hare import Hare, Conversation from hare.brain import BiGruBrain brain: BiGruBrain = BiGruBrain() brain.embedding_location = '/vol/bigdata/word_embeddings/glove/glove.6B.50d.txt' brain.verbose = True hare = Hare() hare.brain = brain convo = Conversation() convo.add_utterance(speaker='a', content='hate you') convo.add_utterance(speaker='b', content='i love you') convo.label_speaker('a', 1) hare.add_conversation(convo) hare.train() hare.save('/vol/tensusers2/wstoop/HaRe/hare/pretrained/simple') hare.update_status_history_for_conversation() hare.visualize_history_for_conversation()
current_conversation = Conversation() if len(conversations) % 100 == 0: print(len(conversations)) if len(conversations) == NR_OF_CONVERSATIONS: break continue speaker, content = line.split('\t') current_conversation.add_utterance(speaker, content) #Add to a hare object for downsample_ratio in DOWNSAMPLE_RATIOS: for training_size in TRAINING_SIZES: print('===', 'training', downsample_ratio, training_size, '===') exp_hare = Hare() for conversation in conversations[:training_size]: exp_hare.add_conversation(conversation) exp_hare.brain = BiGruBrain() exp_hare.brain.downsampling = True exp_hare.brain.downsampling_ratio = downsample_ratio exp_hare.brain._max_sequence_length = 500 exp_hare.train() exp_hare.save('moba_' + str(downsample_ratio) + '_' + str(training_size))