def get_system(name, args, schema=None, timed=False, model_path=None): from core.price_tracker import PriceTracker lexicon = PriceTracker(args.price_tracker_model) if name == 'rulebased': from rulebased_system import RulebasedSystem from model.generator import Templates, Generator from model.manager import Manager templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) elif name == 'hybrid': from hybrid_system import HybridSystem templates = Templates.from_pickle(args.templates) manager = PytorchNeuralSystem(args, schema, lexicon, model_path, timed) generator = Generator(templates) return HybridSystem(lexicon, generator, manager, timed) elif name == 'cmd': from cmd_system import CmdSystem return CmdSystem() elif name == 'pt-neural': from neural_system import PytorchNeuralSystem assert model_path return PytorchNeuralSystem(args, schema, lexicon, model_path, timed) else: raise ValueError('Unknown system %s' % name)
def get_system(name, args, schema, model_path=None, timed=False): lexicon = Lexicon.from_pickle(args.lexicon) templates = Templates.from_pickle(args.templates) if name == 'rulebased': templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) elif name == 'cmd': return CmdSystem() else: raise ValueError('Unknown system %s' % name)
def get_system(name, args, schema=None, timed=False, model_path=None): if name in ('rulebased', 'neural'): lexicon = Lexicon(schema, args.learned_lex, stop_words=args.stop_words, lexicon_path=args.lexicon) if args.inverse_lexicon: realizer = InverseLexicon.from_file(args.inverse_lexicon) else: realizer = DefaultInverseLexicon() if name == 'rulebased': templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) elif name == 'neural': assert args.model_path return NeuralSystem(schema, lexicon, args.model_path, args.fact_check, args.decoding, realizer=realizer) elif name == 'cmd': return CmdSystem() else: raise ValueError('Unknown system %s' % name)
def get_system(name, args, schema=None, timed=False, model_path=None): lexicon = PriceTracker(args.price_tracker_model) if name == 'rulebased': templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) #elif name == 'config-rulebased': # configs = read_json(args.rulebased_configs) # return ConfigurableRulebasedSystem(configs, lexicon, timed_session=timed, policy=args.config_search_policy, max_chats_per_config=args.chats_per_config, db=args.trials_db, templates=templates) elif name == 'cmd': return CmdSystem() elif name.startswith('ranker'): # TODO: hack #retriever1 = Retriever(args.index+'-1', context_size=args.retriever_context_len, num_candidates=args.num_candidates) #retriever2 = Retriever(args.index+'-2', context_size=args.retriever_context_len, num_candidates=args.num_candidates) retriever = Retriever(args.index, context_size=args.retriever_context_len, num_candidates=args.num_candidates) if name == 'ranker-ir': return IRRankerSystem(schema, lexicon, retriever) elif name == 'ranker-ir1': return IRRankerSystem(schema, lexicon, retriever1) elif name == 'ranker-ir2': return IRRankerSystem(schema, lexicon, retriever2) elif name == 'ranker-neural': return NeuralRankerSystem(schema, lexicon, retriever, model_path, args.mappings) else: raise ValueError elif name in ('neural-gen', 'neural-sel'): assert model_path return NeuralSystem(schema, lexicon, model_path, args.mappings, args.decoding, index=args.index, num_candidates=args.num_candidates, retriever_context_len=args.retriever_context_len, timed_session=timed) else: raise ValueError('Unknown system %s' % name)
def get_system(name, args, schema=None, timed=False): lexicon = Lexicon(schema.values['owner']) if name == 'rulebased': templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) elif name == 'cmd': return CmdSystem() # elif name == 'neural': # return NeuralSystem(args.model_file, args.temperature, timed_session=timed, gpu=args.gpu) else: raise ValueError('Unknown system %s' % name)
def get_system(name, args, schema=None, timed=False, model_path=None): lexicon = Lexicon(schema.values['item']) if name == 'rulebased': templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) elif name == 'hybrid': assert model_path templates = Templates.from_pickle(args.templates) manager = PytorchNeuralSystem(args, schema, lexicon, model_path, timed) generator = Generator(templates) return HybridSystem(lexicon, generator, manager, timed) elif name == 'cmd': return CmdSystem() elif name == 'fb-neural': assert model_path return FBNeuralSystem(model_path, args.temperature, timed_session=timed, gpu=False) elif name == 'pt-neural': assert model_path return PytorchNeuralSystem(args, schema, lexicon, model_path, timed) else: raise ValueError('Unknown system %s' % name)
parser = argparse.ArgumentParser() parser.add_argument('--transcripts', nargs='*', help='JSON transcripts to extract templates') parser.add_argument('--max-examples', default=-1, type=int) parser.add_argument('--templates', help='Path to load templates') parser.add_argument('--policy', help='Path to load model') parser.add_argument('--schema-path', help='Path to schema') parser.add_argument( '--agent', help='Only consider examples with the given type of agent') add_price_tracker_arguments(parser) args = parser.parse_args() lexicon = PriceTracker(args.price_tracker_model) #templates = Templates.from_pickle(args.templates) templates = Templates() manager = Manager.from_pickle(args.policy) analyzer = Analyzer(lexicon) # TODO: skip examples examples = read_examples(args.transcripts, args.max_examples, Scenario) agent = args.agent if agent is not None: examples = [e for e in examples if agent in e.agents.values()] analyzer.example_stats(examples, agent=agent) #import sys; sys.exit() parsed_dialogues = [] for example in examples: utterances = parse_example(example, lexicon, templates) parsed_dialogues.append(utterances)
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--transcripts', nargs='*', help='JSON transcripts to extract templates') parser.add_argument('--max-examples', default=-1, type=int) parser.add_argument('--templates', help='Path to load templates') parser.add_argument('--templates-output', help='Path to save templates') parser.add_argument('--model', help='Path to load model') parser.add_argument('--model-output', help='Path to save the dialogue manager model') args = parser.parse_args() examples = read_examples(args.transcripts, args.max_examples, Scenario) parsed_dialogues = [] templates = Templates() lexicon = Lexicon(['ball', 'hat', 'book']) for example in examples: utterances = parse_example(example, lexicon, templates) parsed_dialogues.append(utterances) templates.finalize() templates.save(args.templates_output) templates.dump(n=10) # Train n-gram model sequences = [] for d in parsed_dialogues: sequences.append([u.lf.intent for u in d])
parser.add_argument('--lexicon', help='Path to pickled lexicon') parser.add_argument('--transcripts', nargs='*', help='JSON transcripts to extract templates') parser.add_argument('--max-examples', default=-1, type=int) parser.add_argument('--templates', help='Path to load templates') parser.add_argument('--reviews', help='Path to load templates') parser.add_argument('--templates-output', help='Path to save templates') parser.add_argument('--model', help='Path to load model') parser.add_argument('--model-output', help='Path to save the dialogue manager model') args = parser.parse_args() examples = read_examples(args.transcripts, args.max_examples, Scenario) parsed_dialogues = [] templates = Templates() lexicon = Lexicon.from_pickle(args.lexicon) for example in examples: utterances = parse_example(example, lexicon, templates) parsed_dialogues.append(utterances) # Train n-gram model sequences = [] for d in parsed_dialogues: sequences.append([u.lf.intent for u in d]) manager = Manager.from_train(sequences) manager.save(args.model_output) if args.reviews: