def test_simulate_multidomain(self): '''Run HDC policy for domains in 2 multi domain dialogues ''' Settings.init(config_file='./tests/test_configs/simulate_multiDomains_HDC.cfg') ContextLogger.createLoggingHandlers(config=Settings.config) logger = ContextLogger.getLogger('') logger.info("Starting HDC Multidomain Simulation") reload(Ontology.FlatOntologyManager) # since this has a singleton class Ontology.init_global_ontology() simulator = Simulate.SimulationSystem(error_rate=0.1) simulator.run_dialogs(2) # run 2 dialogues
def test_simulate_multidomain(self): '''Run 2 dialogues with the multidomain system and GP policies (NB: config needs to explicitly set each domain to use gp actually...) ''' Settings.init(config_file='./tests/test_configs/simulate_multiDomains_GP.cfg') ContextLogger.createLoggingHandlers(config=Settings.config) logger = ContextLogger.getLogger('') logger.info("Starting GP Multidomain Simulation") reload(Ontology.FlatOntologyManager) # since this has a singleton class Ontology.init_global_ontology() simulator = Simulate.SimulationSystem(error_rate=0.1) simulator.run_dialogs(2) # run 2 dialogues
def test_simulate_singledomain_for_all_domains(self): '''Loop over all domains and runs 2 dialogues with GP policy in each ''' import numpy.random as nprand # not seeding this - so will be based on system clock--> meaning TESTING here will be somewhat random --> # NOTE --> THIS IS !!!REALLY REALLY REALLY!!! NOT IN THE SPIRIT OF (UNIT) TESTING - WHICH SHOULD NEVER BE ~RANDOM # doing it to get a little more coverage of testing without writing permutations of explicit tests ... Settings.init( config_file= './tests/test_configs/simulate_singledomain_for_all_domains.cfg') ContextLogger.createLoggingHandlers(config=Settings.config) logger = ContextLogger.getLogger('') domains = TestingUtils.get_loop_over_domains() for dstring in domains: Settings.config.set("GENERAL", 'domains', dstring) logger.info("Starting GP Single Domain Simulation for " + dstring) # [policy_DOMAINTAG] Settings.config.add_section('policy_' + dstring) Settings.config.set('policy_' + dstring, 'learning', 'True') Settings.config.set('policy_' + dstring, 'policytype', 'gp') # no inpolicy file give --> starts with empty file (random policy) Settings.config.set('policy_' + dstring, 'outpolicyfile', 'tests/test_gp/' + dstring) if nprand.uniform() > 0.5: Settings.config.set('policy_' + dstring, 'belieftype', 'baseline') else: Settings.config.set('policy_' + dstring, 'belieftype', 'focus') # [gppolicy_DOMAINTAG] Settings.config.add_section('gppolicy_' + dstring) if nprand.uniform() > 0.5: Settings.config.set('gppolicy_' + dstring, 'kernel', 'polysort') else: Settings.config.set('gppolicy_' + dstring, 'kernel', 'gausssort') if nprand.uniform() > 0.5: Settings.config.set('gppolicy_' + dstring, 'actionkerneltype', 'delta') else: Settings.config.set('gppolicy_' + dstring, 'actionkerneltype', 'hdc') reload(Ontology.FlatOntologyManager ) # since this has a singleton class Ontology.init_global_ontology() simulator = Simulate.SimulationSystem(error_rate=0.1) simulator.run_dialogs(2) # run 2 dialogues
def test_simulate_singledomain_for_all_domains(self): '''Loop over all domains and run 2 dialogues via HDC policy in each ''' Settings.init(config_file='./tests/test_configs/simulate_singledomain_for_all_domains.cfg') ContextLogger.createLoggingHandlers(config=Settings.config) logger = ContextLogger.getLogger('') domains = TestingUtils.get_loop_over_domains() for dstring in domains: Settings.config.set("GENERAL", 'domains', dstring) # no policy settings given --> defaults to HDC logger.info("Starting HDC Single Domain Simulation for "+dstring) reload(Ontology.FlatOntologyManager) # since this has a singleton class Ontology.init_global_ontology() simulator = Simulate.SimulationSystem(error_rate=0.1) simulator.run_dialogs(2) # run 2 dialogues