def setup_container(self): other_config = yaml.load(NamedConfigTestCase.config) self.config_mapping = { MASTER_NAMESPACE: valid_config(yaml.load(self.config)), 'other': validate_fragment('other', other_config), } self.container = config_parse.ConfigContainer(self.config_mapping)
def test_create(self): config_mapping = { MASTER_NAMESPACE: yaml.load(self.config), 'other': yaml.load(NamedConfigTestCase.config), } container = config_parse.ConfigContainer.create(config_mapping) assert_equal(set(container.configs.keys()), {'MASTER', 'other'})
def get_key_from_last_line(filepath, key): with open(filepath) as f: lines = f.readlines() if lines: content = yaml.load(lines[-1]) return content.get(key) return None
def notify(notify_queue, ignored, filepath, mask): with open(filepath.path) as f: last_line = f.readlines()[-1] entries = yaml.load(last_line) pid = entries.get('runner_pid') return_code = entries.get('return_code') exit_code, error_msg = None, None if return_code is not None: if return_code < 0: # from the subprocess docs on the return code of a process: # "A negative value -N indicates that the child was terminated by signal N (POSIX only)." # We should always exit with a positive code, so we take the absolute value of the return code exit_code = abs(return_code) error_msg = ('Action run killed by signal ' f'{signal.Signals(exit_code).name}') else: exit_code = return_code elif not psutil.pid_exists(pid): exit_code = 1 error_msg = (f'Action runner pid {pid} no longer running; ' 'unable to recover it') if exit_code is not None: reactor.stop() notify_queue.put((exit_code, error_msg))
def read_config(filename=CONFIG_FILE_NAME): try: with opener(filename, 'r') as config_file: return yaml.load(config_file) except (IOError, OSError): log.info("Failed to read config file: %s" % CONFIG_FILE_NAME) return {}
def main(): args = parse_args() for filename in os.listdir(args.src): print('filename = {}'.format(filename)) filepath = os.path.join(args.src, filename) if os.path.isfile(filepath) and filepath.endswith(".yaml"): with open(filepath, "r") as f: config = yaml.load(f) if filename == "MASTER.yaml": for key in list(config): if key != 'jobs': del config[key] jobs = config.get("jobs", []) if jobs is not None: for job in jobs: job['node'] = "localhost" if 'monitoring' in job: del job['monitoring'] for action in job.get("actions", []): action['command'] = 'sleep 10s' if "node" in action: action['node'] = "localhost" for i in range(args.multiple): out_filepath = os.path.join( args.dest, 'load_testing_' + str(i) + '-' + filename) with open(out_filepath, 'w') as outf: yaml.dump(config, outf, default_flow_style=False)
def check_config(self, name, content, config_hash): """Update a configuration fragment and reload the MCP.""" if self.config_manager.get_hash(name) != config_hash: return "Configuration update will fail: config is stale, try again" try: content = yaml.load(content) self.config_manager.validate_with_fragment(name, content) except Exception as e: return "Configuration update will fail: %s" % str(e)
def restore(self, keys): if not os.path.exists(self.filename): return {} with open(self.filename, 'r') as fh: self.buffer = yaml.load(fh) items = (self.buffer.get(key.type, {}).get(key.iden) for key in keys) key_item_pairs = zip(keys, items) return dict(filter(operator.itemgetter(1), key_item_pairs))
def test_invalid_named_update(self): test_config = """bozray:""" test_config = yaml.load(test_config) expected_message = "Unknown keys in NamedConfigFragment : bozray" exception = assert_raises( ConfigError, validate_fragment, 'foo', test_config, ) assert_in(expected_message, str(exception))
def test_save(self): expected = {'one': {'five': 'dataz'}, 'two': {'seven': 'stars'}} key_value_pairs = [ (yamlstore.YamlKey('one', 'five'), 'barz'), ] # Save first self.store.save(key_value_pairs) # Save second key_value_pairs = [ (yamlstore.YamlKey('two', 'seven'), 'stars'), (yamlstore.YamlKey('one', 'five'), 'dataz'), ] self.store.save(key_value_pairs) assert_equal(self.store.buffer, expected) with open(self.filename, 'r') as fh: actual = yaml.load(fh) assert_equal(actual, expected)
def test_delete(self): expected = {'state_a': {'five': 'barz'}} key_value_pairs = [ (yamlstore.YamlKey('state_a', 'five'), 'barz'), (yamlstore.YamlKey('state_c', 'five'), 'delete_all_c'), (yamlstore.YamlKey('state_a', 'six'), 'delete_one_a'), ] # Save first self.store.save(key_value_pairs) # Save second key_value_pairs = [ (yamlstore.YamlKey('state_c', 'five'), None), (yamlstore.YamlKey('state_a', 'six'), None), ] self.store.save(key_value_pairs) assert_equal(self.store.buffer, expected) with open(self.filename, 'r') as fh: actual = yaml.load(fh) assert_equal(actual, expected)
def get_status_file(path): with open(path, 'r') as fh: return yaml.load(fh)
def test_attributes(self): expected = schema.NamedTronConfig( jobs=FrozenDict({ 'test_job0': schema.ConfigJob( name='test_job0', namespace='test_namespace', node='node0', monitoring={}, schedule=ConfigIntervalScheduler( timedelta=datetime.timedelta(0, 20), jitter=None, ), actions=FrozenDict({ 'action0_0': schema.ConfigAction( name='action0_0', command='test_command0.0', requires=(), node=None, ), }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=schema.ConfigCleanupAction( name='cleanup', command='test_command0.1', node=None, ), enabled=True, max_runtime=None, allow_overlap=False, time_zone=None, ), 'test_job1': schema.ConfigJob( name='test_job1', namespace='test_namespace', node='node0', enabled=True, monitoring={}, schedule=schedule_parse.ConfigDailyScheduler( days={1, 3, 5}, hour=0, minute=30, second=0, original="00:30:00 MWF", jitter=None, ), actions=FrozenDict({ 'action1_1': schema.ConfigAction( name='action1_1', command='test_command1.1', requires=('action1_0', ), node=None, ), 'action1_0': schema.ConfigAction( name='action1_0', command='test_command1.0 %(some_var)s', requires=(), node=None, ), }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=None, max_runtime=None, allow_overlap=True, time_zone=None, ), 'test_job2': schema.ConfigJob( name='test_job2', namespace='test_namespace', node='node1', enabled=True, monitoring={}, schedule=schedule_parse.ConfigDailyScheduler( days=set(), hour=16, minute=30, second=0, original="16:30:00 ", jitter=None, ), actions=FrozenDict({ 'action2_0': schema.ConfigAction( name='action2_0', command='test_command2.0', requires=(), node=None, ), }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=None, max_runtime=None, allow_overlap=False, time_zone=None, ), 'test_job3': schema.ConfigJob( name='test_job3', namespace='test_namespace', node='node1', schedule=ConfigConstantScheduler(), enabled=True, monitoring={}, actions=FrozenDict({ 'action3_1': schema.ConfigAction( name='action3_1', command='test_command3.1', requires=(), node=None, ), 'action3_0': schema.ConfigAction( name='action3_0', command='test_command3.0', requires=(), node=None, ), 'action3_2': schema.ConfigAction( name='action3_2', command='test_command3.2', requires=('action3_0', 'action3_1'), node='node0', ), }), queueing=True, run_limit=50, all_nodes=False, cleanup_action=None, max_runtime=None, allow_overlap=False, time_zone=None, ), 'test_job4': schema.ConfigJob( name='test_job4', namespace='test_namespace', node='NodePool', monitoring={}, schedule=schedule_parse.ConfigDailyScheduler( days=set(), hour=0, minute=0, second=0, original="00:00:00 ", jitter=None, ), actions=FrozenDict({ 'action4_0': schema.ConfigAction( name='action4_0', command='test_command4.0', requires=(), node=None, ), }), queueing=True, run_limit=50, all_nodes=True, cleanup_action=None, enabled=False, max_runtime=None, allow_overlap=False, time_zone=None, ), }), services=FrozenDict( { 'service0': schema.ConfigService( namespace='test_namespace', name='service0', node='NodePool', pid_file='/var/run/%(name)s-%(instance_number)s.pid', command='service_command0', monitor_interval=20, monitor_retries=5, restart_delay=None, count=2, ), 'service1': schema.ConfigService( namespace='test_namespace', name='service1', node='NodePool', pid_file='/var/run/%(name)s-%(instance_number)s.pid', command='service_command1', monitor_interval=40.0, monitor_retries=5, restart_delay=None, count=20, ), }, ), ) test_config = validate_fragment( 'test_namespace', yaml.load(self.config), ) assert_equal(test_config.jobs['test_job0'], expected.jobs['test_job0']) assert_equal(test_config.jobs['test_job1'], expected.jobs['test_job1']) assert_equal(test_config.jobs['test_job2'], expected.jobs['test_job2']) assert_equal(test_config.jobs['test_job3'], expected.jobs['test_job3']) assert_equal(test_config.jobs['test_job4'], expected.jobs['test_job4']) assert_equal( test_config.services['service0'], expected.services['service0'], ) assert_equal( test_config.services['service1'], expected.services['service1'], ) assert_equal(test_config.jobs, expected.jobs) assert_equal(test_config.services, expected.services) assert_equal(test_config, expected) assert_equal(test_config.jobs['test_job4'].enabled, False)
def from_string(content): try: return yaml.load(content) except yaml_raw.error.YAMLError as e: raise ConfigError("Invalid config format: %s" % str(e))
def print_status_file(status_file): for line in status_file.readlines(): print(yaml.load(line))
def main(): args = parse_args() filename = args.source hostname = urlparse(args.server).hostname if filename.endswith(".yaml"): tron_client = client.Client(args.server) jobs_status = tron_client.jobs() is_migration_safe = True with open(filename, "r") as f: jobs = yaml.load(f)['jobs'] job_names = [job['name'] for job in jobs] if args.job is not None: # only want to migrate specific job # Overwrite existing jobs since only migrating one job jobs = [job for job in jobs if job['name'] == args.job] if not jobs: raise ValueError( f'Invalid job specified. Options were {job_names}') job_name_with_ns = args.old_ns + '.' + args.job is_migration_safe = is_migration_safe & check_job_if_running( jobs_status, job_name_with_ns) else: # Migrate all jobs in namespace for job_name in job_names: job_name_with_ns = args.old_ns + '.' + job_name is_migration_safe = is_migration_safe & check_job_if_running( jobs_status, job_name_with_ns) if is_migration_safe is True: print(bcolors.OKBLUE + "Jobs are not running." + bcolors.ENDC) else: print(bcolors.WARNING + "Some jobs are still running, abort this migration," + bcolors.ENDC) return # try stop cron ssh_command(hostname, "sudo service cron stop") # wait unitil yelpsoa-configs branch is merged res = input( "Merge and push yelpsoa-configs branch. Ready to continue? [y/n]") if res == 'y': # wait for 10 seconds after pushing the branch time.sleep(30) # rsyn yelpsoa-configs command = "sudo rsync -a --delay-updates --contimeout=10 --timeout=10 --chmod=Du+rwx,go+rx --port=8731 --delete yelpsoa-slave.local.yelpcorp.com::yelpsoa-configs /nail/etc/services" ssh_command(hostname, command) # migrate jobs to new namespace command_jobs('move', jobs, args) # update new namespace ssh_command(hostname, "sudo paasta_setup_tron_namespace " + args.new_ns) # update old namespace if only one job is moving if args.job: ssh_command(hostname, "sudo paasta_setup_tron_namespace " + args.old_ns) #clean up namespace ssh_command(hostname, "sudo paasta_cleanup_tron_namespaces") # start cron ssh_command(hostname, "sudo service cron start") return