def get_state_manager_from_config(config_filename): """Return a state manager that is configured in the file at config_filename. """ with open(config_filename) as fh: config = config_parse.load_config(fh) state_config = config.state_persistence return PersistenceManagerFactory.from_config(state_config)
def convert_state(opts): source_manager = get_state_manager_from_config(opts.source) dest_manager = get_state_manager_from_config(opts.dest) with open(opts.source) as fh: config = config_parse.load_config(fh) msg = "Migrating state from %s to %s" print msg % (source_manager._impl, dest_manager._impl) job_items, service_items = config.jobs.values(), config.services.values() jobs_states, services_states = source_manager.restore(job_items, service_items) source_manager.cleanup() for name, job in jobs_states.iteritems(): dest_manager.save_job(Item(name, job)) print "Migrated %s jobs." % len(jobs_states) for name, service in services_states.iteritems(): dest_manager.save_service(Item(name, service)) print "Migrated %s services." % len(services_states) dest_manager.cleanup()
def reconfigure(self): config = self._get_config(1, self.test_dir) contents = config_parse.load_config(config) self.mcp.apply_config(contents, reconfigure=True)
def setup_mcp(self): self.test_dir = tempfile.mkdtemp() self.mcp = mcp.MasterControlProgram(self.test_dir, 'config') config = self._get_config(0, self.test_dir) self.mcp.apply_config(config_parse.load_config(config))
if not opts.name: parser.error("A Job name is required.") return opts def build_diagram(config, job_name): if job_name not in config.jobs: raise ValueError("Could not find Job %s" % job_name) job = config.jobs[job_name] edges, nodes = [], [] for action in job.actions.itervalues(): shape = 'invhouse' if not action.requires else 'rect' nodes.append("node [shape = %s]; %s" % (shape, action.name)) for required_action in action.requires: edges.append("%s -> %s" % (required_action, action.name)) return "digraph g{%s\n%s}" % ('\n'.join(nodes), '\n'.join(edges)) if __name__ == '__main__': opts = parse_args() with open(opts.config, 'r') as fh: config = config_parse.load_config(fh) graph = build_diagram(config, opts.name) with open('%s.dot' % opts.name, 'w') as fh: fh.write(graph)
def test_attributes(self): test_config = load_config(StringIO.StringIO(self.config)) expected = TronConfig( working_dir=None, output_stream_dir='/tmp', command_context=FrozenDict(**{ 'python': '/usr/bin/python', 'batch_dir': '/tron/batch/test/foo' }), ssh_options=ConfigSSHOptions( agent=True, identities=['tests/test_id_rsa'], ), notification_options=None, time_zone=None, nodes=FrozenDict(**{ 'batch0': ConfigNode(name='batch0', hostname='batch0'), 'batch1': ConfigNode(name='batch1', hostname='batch1') }), node_pools=FrozenDict(**{ 'batch0_batch1': ConfigNodePool(nodes=['batch0', 'batch1'], name='batch0_batch1') }), jobs=FrozenDict(**{ 'test_job0': ConfigJob( name='test_job0', node='batch0', schedule=ConfigIntervalScheduler( timedelta=datetime.timedelta(0, 20)), actions=FrozenDict(**{ 'action0_0': ConfigAction( name='action0_0', command='test_command0.0', requires=(), node=None) }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=ConfigCleanupAction( name='cleanup', command='test_command0.1', requires=(), node=None), enabled=True), 'test_job1': ConfigJob( name='test_job1', node='batch0', enabled=True, schedule=ConfigDailyScheduler( ordinals=None, weekdays=set([0, 2, 4]), monthdays=None, months=None, timestr='00:30', ), actions=FrozenDict(**{ 'action1_1': ConfigAction( name='action1_1', command='test_command1.1', requires=('action1_0',), node=None), 'action1_0': ConfigAction( name='action1_0', command='test_command1.0', requires=(), node=None) }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=None), 'test_job2': ConfigJob( name='test_job2', node='batch1', enabled=True, schedule=ConfigDailyScheduler( ordinals=None, weekdays=None, monthdays=None, months=None, timestr='16:30', ), actions=FrozenDict(**{ 'action2_0': ConfigAction( name='action2_0', command='test_command2.0', requires=(), node=None) }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=None), 'test_job3': ConfigJob( name='test_job3', node='batch1', schedule=ConfigConstantScheduler(), enabled=True, actions=FrozenDict(**{ 'action3_1': ConfigAction( name='action3_1', command='test_command3.1', requires=(), node=None), 'action3_0': ConfigAction( name='action3_0', command='test_command3.0', requires=(), node=None), 'action3_2': ConfigAction( name='action3_2', command='test_command3.2', requires=('action3_0', 'action3_1'), node='batch0') }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=None), 'test_job4': ConfigJob( name='test_job4', node='batch0_batch1', schedule=ConfigDailyScheduler( ordinals=None, weekdays=None, monthdays=None, months=None, timestr='00:00', ), actions=FrozenDict(**{ 'action4_0': ConfigAction( name='action4_0', command='test_command4.0', requires=(), node=None)}), queueing=True, run_limit=50, all_nodes=True, cleanup_action=None, enabled=False) }), services=FrozenDict(**{ 'service0': ConfigService( name='service0', node='batch0_batch1', pid_file='/var/run/%(name)s-%(instance_number)s.pid', command='service_command0', monitor_interval=20, restart_interval=None, count=2) } ) ) # we could just do a big assert_equal here, but it would be hella hard # to debug failures that way. assert_equal(test_config.command_context, expected.command_context) assert_equal(test_config.ssh_options, expected.ssh_options) assert_equal(test_config.notification_options, expected.notification_options) assert_equal(test_config.time_zone, expected.time_zone) assert_equal(test_config.nodes, expected.nodes) assert_equal(test_config.node_pools, expected.node_pools) assert_equal(test_config.jobs['test_job0'], expected.jobs['test_job0']) assert_equal(test_config.jobs['test_job1'], expected.jobs['test_job1']) assert_equal(test_config.jobs['test_job2'], expected.jobs['test_job2']) assert_equal(test_config.jobs['test_job3'], expected.jobs['test_job3']) assert_equal(test_config.jobs['test_job4'], expected.jobs['test_job4']) assert_equal(test_config.jobs, expected.jobs) assert_equal(test_config.services, expected.services) assert_equal(test_config, expected) assert_equal(test_config.jobs['test_job4'].enabled, False)
def load_config(self, reconfigure=False): log.info("Loading configuration from %s" % self.config_file) with open(self.config_file, 'r') as f: config = config_parse.load_config(f) self.apply_config(config, reconfigure=reconfigure)
def _load_config(self, reconfigure=False): """Read config data and apply it.""" log.info("Loading configuration from %s" % self.config_filepath) with open(self.config_filepath, 'r') as f: config = config_parse.load_config(f) self.apply_config(config, reconfigure=reconfigure)