def test_acquire_lock_fails_without_workspace(self): with temp_dir() as directory: runner = Runner(directory, None) # Call runner.acquire_lock at this level, at which point directory # will have already been cleaned up. with self.assertRaises(SystemExit): runner.acquire_lock()
def getPendingJobs(self): #get all the json files that are in in the sample directories logging.debug("%s - Looking for JSON job files in %s" % (getTimestamp(), self.__sampleDirectories)) files = {} jobsToProcess = {} # instantiate a runner object in case it's needed runner = Runner("CommandLine") #recurse through and find the json files for directory in self.__sampleDirectories: # set directory as the absolute path directory = os.path.abspath(directory) #see if this directory exists if os.path.isdir(directory): for root, dirnames, filenames in os.walk(directory): for filename in fnmatch.filter(filenames, '*.json'): #see if it is the right type #logging.debug('%s - Looking at %s' % (getTimestamp(), os.path.join(root, filename))) jsonData = open(os.path.join(root, filename)) try: fileData = json.load(jsonData) except ValueError: print "\nERROR: The JSON file: %s could not be loaded by python as a JSON file.\n"%os.path.join(root, filename) + \ "Please ensure that the JSON file is not currently open in vim and that it is formatted correctly. Otherwise, delete it." sys.exit(1) #since other json files may be around, let's be sure they have the analysis type flag #can use this to filter things too if 'analysis' in fileData and 'type' in fileData['analysis'] and 'status' in fileData: # If the analysis type matches the given jobFilter, check to see if this job should be started if fileData['analysis']['type'] in self.__jobFilters: # if the status is 'queued' and the user specified requeue, then delete the current pending or running job if 'sge_job_id' in fileData and (fileData['status'] == 'queued' and options.requeue): logging.info('%s - Deleting Job ID %s' % (getTimestamp(), fileData['sge_job_id'])) # delete this json file's job id command = "qdel %s"%fileData['sge_job_id'] runner.runCommandLine(command) # reset the fileData status to pending to requeue this job fileData['status'] = 'pending' # If the status is 'pending' or if the user specified a status type to rerun, then start this job if fileData['status'] == 'pending' or fileData['status'] == options.rerun: #job was the right type so we can add to list files[os.path.join(root, filename)] = directory #process each of the json files for file in files: #rename the json file so it won't get picked up by the next thread logging.debug('%s - Found %s' % (getTimestamp(), file)) # shutil.move(file, '%s' % file) #shutil.copy(file, '%s_read' % (file)) #add the file to the array jobsToProcess['%s' % (file)] = files[file] #return the array return(jobsToProcess)
def start(self): self.logger.debug('Starting up') # Read the config config = self.read_config() dataReader = DataReader(config, self.logger) dataParser = DataParser(config, self.logger) dataLogger = DataLogger(config, self.logger) imageReader = ImageReader(config, self.logger) imageDataWriter = ImageDataWriter(config, self.logger) runner = Runner( dataReader=dataReader, dataParser=dataParser, dataLogger=dataLogger, imageReader=imageReader, imageDataWriter=imageDataWriter, config=config, logger=self.logger ) if len(sys.argv) > 1: runner.run(timestamp=int(sys.argv[1])) else: runner.run()
def test_random_enablement_zero(self): with patch('utility.check_output', autospec=True) as mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.random_chaos(run_timeout=1, enablement_timeout=0, exclude_command=Kill.restart_cmd) self.assertEqual(mock.called, True)
def v2_run_playbook(hostnames, connection, playbook_path, inventory_path, role, private_key_file=None, extra_tags={}, data={}): if type(hostnames) != type([]): hostnames = [hostnames] run_data = { 'type': role, 'extra_tags': extra_tags, 'tag_hash_values': '', } for key in data: run_data[key] = data[key] for key in extra_tags: run_data['tag_hash_values'] += ',"%s":"%s"' % (key, extra_tags[key]) # The playbooks can require env set here :( if "AWS_ACCESS_KEY_ID" not in os.environ: os.environ['AWS_ACCESS_KEY_ID'] = settings.AWS_ACCESS_KEY_ID os.environ['AWS_SECRET_ACCESS_KEY'] = settings.AWS_SECRET_ACCESS_KEY runner = Runner( connection=connection, private_key_file=private_key_file, hostnames=hostnames, playbook=playbook_path, run_data=run_data, verbosity=8, ) stats = runner.run() return stats
def test_random_assert_run_command_method_called(self): with patch('utility.check_output', autospec=True): with patch('runner.Runner._run_command', autospec=True) as cm_mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.random_chaos(run_timeout=1, enablement_timeout=1) cm_mock.assert_called_with(runner, 1)
def test_filter_command_include_command(self): include_command = 'deny-all' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(include_command=include_command) self.assertEqual(len(runner.chaos_monkey.chaos), 1) self.assertEqual(runner.chaos_monkey.chaos[0].command_str, 'deny-all')
def test_filter_commands_exclude_incorrect_group(self): exclude_group = 'net,killl' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with self.assertRaisesRegexp( BadRequest, "Invalid value given on command line: killl"): runner.filter_commands(exclude_group=exclude_group)
def test_acquire_lock_fails_when_existing_lockfile(self): with temp_dir() as directory: expected_file = os.path.join(directory, 'chaos_runner.lock') open(expected_file, 'a').close() runner = Runner(directory, None) with self.assertRaises(SystemExit): runner.acquire_lock()
def __init__(self, cmakeConfig): Runner.__init__(self) self._config = cmakeConfig self._currBuild = None self._rootPath = None self._buildDict = None self._buildPath = None self._generator = None
def test_filter_command_include_incorrect_command(self): include_command = 'deny-all,deny-net' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with self.assertRaisesRegexp( BadRequest, "Invalid value given on command line: deny-net"): runner.filter_commands(include_command=include_command)
def test_random_chaos_passes_timeout(self): with patch('utility.check_output', autospec=True): with patch('runner.Runner._run_command', autospec=True) as mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.random_chaos(run_timeout=3, enablement_timeout=2) self.assertEqual(mock.call_args_list[0][0][1], 2)
def test_filter_commands_exclude_group(self): exclude_group = 'net' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(exclude_group=exclude_group) self.assertGreaterEqual(len(runner.chaos_monkey.chaos), 2) self.assertTrue(all(c.group != 'net' for c in runner.chaos_monkey.chaos))
def test_filter_command_exclude_incorrect_command(self): exclude_command = 'deny-all,deny-net,{}'.format(Kill.jujud_cmd) with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with self.assertRaisesRegexp( BadRequest, "Invalid value given on command line: deny-net"): runner.filter_commands(exclude_command=exclude_command)
def test_filter_command_exclude_command(self): exclude_command = Kill.jujud_cmd with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(exclude_command=exclude_command) self.assertGreaterEqual(len(runner.chaos_monkey.chaos), 1) self.assertTrue(all(c.command_str != Kill.jujud_cmd for c in runner.chaos_monkey.chaos))
def run(self, command): runner = Runner(self.config) relpath = path.relpath('.', self.repo.basedir) with repo_clone(self.repo, self.config) as clone_dir: annotations = runner.run(clone_dir, relpath, command) self.repo.annotate(annotations)
def test_random_chaos_run_once(self): cm = ChaosMonkey.factory() with patch('runner.Runner._run_command', autospec=True) as mock: with temp_dir() as directory: runner = Runner(directory, cm) runner.random_chaos( run_timeout=2, enablement_timeout=1, run_once=True) mock.assert_called_once_with(runner, 1)
def test_verify_lock_empty_lock_file(self): with temp_dir() as directory: expected_file = os.path.join(directory, 'chaos_runner.lock') open(expected_file, 'a').close() runner = Runner(directory, None) runner.workspace_lock = True runner.lock_file = expected_file with self.assertRaisesRegexp(NotFound, 'Unexpected pid:'): runner.verify_lock()
def test_filter_commands_include_groups(self): include_group = 'net,{}'.format(Kill.group) with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(include_group=include_group) self.assertGreaterEqual(len(runner.chaos_monkey.chaos), 2) self.assertTrue( all(c.group == 'net' or c.group == Kill.group for c in runner.chaos_monkey.chaos))
def test_verify_lock(self): with temp_dir() as directory: expected_file = os.path.join(directory, 'chaos_runner.lock') with open(expected_file, 'w') as lock_file: lock_file.write(str(os.getpid())) runner = Runner(directory, None) runner.workspace_lock = True runner.lock_file = expected_file runner.verify_lock()
def test_get_command_list(self): with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with NamedTemporaryFile() as temp_file: self._write_command_list_to_file(temp_file) args = Namespace(replay=temp_file.name, restart=False) commands = runner._get_command_list(args) expected = [['deny-state-server', 1], ['deny-api-server', 1]] self.assertItemsEqual(commands, expected)
def test_run_command(self): chaos = self._get_chaos_object(Net(), 'deny-state-server') with patch('utility.check_output', autospec=True) as mock: with patch( 'runner.random.choice', autospec=True, return_value=chaos): with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner._run_command(enablement_timeout=0) self.assertEqual(mock.mock_calls, self._deny_port_call_list())
def test_acquire_lock(self): with temp_dir() as directory: expected_file = os.path.join(directory, 'chaos_runner.lock') expected_pid = str(os.getpid()) runner = Runner(directory, None) runner.acquire_lock() self.assertTrue(os.path.exists(expected_file)) with open(expected_file, 'r') as lock_file: pid = lock_file.read() self.assertEqual(pid, expected_pid)
def run_scenarios(): configuration = Configuration(command_args='') # important: without this, the sys.args from celery worker confuse it configuration.scenarios_dir = app.config['SCENARIOS_DIR'] configuration.plugins_dir = app.config['PLUGINS_DIR'] configuration.format = ['pretty'] configuration.verbose = True runner = Runner(configuration) failed = runner.run()
def test_verify_lock_bad_pid_in_lock_file(self): with temp_dir() as directory: expected_file = os.path.join(directory, 'chaos_runner.lock') with open(expected_file, 'w') as lock_file: lock_file.write('bad_pid') runner = Runner(directory, None) runner.workspace_lock = True runner.lock_file = expected_file with self.assertRaisesRegexp(NotFound, 'Unexpected pid:'): runner.verify_lock()
def test_replay_commands(self): with patch('utility.check_output', autospec=True) as mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with NamedTemporaryFile() as temp_file: self._write_command_list_to_file(temp_file) args = Namespace(replay=temp_file.name, restart=False) runner.replay_commands(args) expected = self._deny_port_call_list() expected.extend(self._deny_port_call_list('17017')) self.assertEqual(mock.mock_calls, expected)
def test_run_command_select_restart_unit(self): chaos = self._get_chaos_object(Kill(), Kill.restart_cmd) with patch('utility.check_output', autospec=True) as mock: with patch( 'runner.random.choice', autospec=True, return_value=chaos): with patch('runner.Init', autospec=True) as ri_mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner._run_command(enablement_timeout=0) self.assertEqual(mock.mock_calls, [call(['shutdown', '-r', 'now'])]) ri_mock.upstart.assert_called_once_with()
def test_filter_command_include_command_exclude_group(self): include_command = 'deny-all,deny-incoming' exclude_group = 'net' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(exclude_group=exclude_group, include_command=include_command) self.assertEqual(len(runner.chaos_monkey.chaos), 2) self.assertEqual(runner.chaos_monkey.chaos[0].command_str, 'deny-all') self.assertEqual(runner.chaos_monkey.chaos[1].command_str, 'deny-incoming')
def test_filter_commands_include_group_and_exclude_command(self): include_group = 'net' exclude_command = 'deny-all' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(include_group=include_group, exclude_command=exclude_command) self.assertGreaterEqual(len(runner.chaos_monkey.chaos), 1) self.assertTrue(all(c.group == 'net' for c in runner.chaos_monkey.chaos)) self.assertTrue(all(c.command_str != 'deny-all' for c in runner.chaos_monkey.chaos))
def test_save_replay_command_list(self): commands = [['deny-state-server', 1], ['deny-api-server', 1]] with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with NamedTemporaryFile( suffix=runner.replay_filename_ext) as temp_file: args = Namespace(replay=temp_file.name.split('.')[0], restart=False) runner._save_command_list(commands, args) file_content = temp_file.read() expected = yaml.dump(commands) self.assertItemsEqual(file_content, expected)
def manageJob(self, jobFile, baseDir): #we will want to capture the process exit code and SGE number jobNumberSGE = -1 #load the job file jsonData = open(jobFile) fileData = json.load(jsonData) logging.debug('%s - %s' % (getTimestamp(), fileData)) #create the output folder outputFolder = '%s/%s' % (fileData['sample_folder'], fileData['name']) logging.debug('%s - Creating output folder %s' % (getTimestamp(), outputFolder)) fileData['output_folder'] = outputFolder #see if it exists first if not os.path.exists(outputFolder): os.makedirs(outputFolder) #write the job template fileData['json_file'] = jobFile templateWriter = TemplateWriter(outputFolder, self.__softwareDirectory) analysisFile = templateWriter.writeTemplate(fileData) #now we can pass the job to be executed over to the job runner runner = Runner("CommandLine") logging.info('%s - Starting %s' % (getTimestamp(), analysisFile)) fileData['status'] = 'submitted' fileData['output_folder'] = outputFolder # if the --requeue option was specified, update the queue if options.requeue: fileData['analysis']['settings']['queue'] = options.requeue #update the json self.__updateJSON(jobFile, fileData) #submit the job to SGE sgeJobID = runner.submitToSGE('%s/job.sh' % (outputFolder), fileData) fileData['status'] = 'queued' fileData['sge_job_id'] = sgeJobID logging.info('%s - Submitted to SGE (%i)' % (getTimestamp(), sgeJobID)) #update the json self.__updateJSON(jobFile, fileData)
class fsm(): states = [ {'name':'idle'}, {'name':'run_state',}, {'name':'config_state'} ] transitions = [ { 'trigger': 'trigger_run', 'source': ['idle','config_state'], 'dest': 'run_state'}, { 'trigger': 'trigger_config', 'source': ['idle','run_state'], 'dest': 'config_state'} ] def __init__(self): self.name = 'temp_runner' # Initialize the state machine self.machine = Machine(model=self, states=fsm.states, transitions=fsm.transitions, initial='idle') self.runner = Runner() self.config = Config() def on_enter_run_state(self): print ('setup_run_state') self.runner.run(0) def on_exit_run_state(self): print ('quit_run_state') self.runner.stop() def on_enter_config_state(self): print ('setup config state') time.sleep(5) self.config.run() def on_exit_config_state(self): print ('quit config state') self.config.stop()
def runner(request): offline = "offline" in request.node.keywords r = Runner(not offline) # create repo locally only reponame = util.randrepo() localdir = f"{reponame}" os.mkdir(localdir) r.cdrel(localdir) r.reponame = None if offline else reponame yield r # cleanup r.cleanup() r.logout()
def test_filter_commands_exclude_groups_and_exclude_commands(self): exclude_group = '{},net'.format(Kill.group) exclude_command = 'deny-all,{}'.format(Kill.jujud_cmd) with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(exclude_group=exclude_group, exclude_command=exclude_command) add_fake_group(runner.chaos_monkey.chaos) self.assertGreaterEqual(len(runner.chaos_monkey.chaos), 1) self.assertTrue(any(c.group == 'fake_group' for c in runner.chaos_monkey.chaos)) self.assertTrue(any(c.group != Kill.group for c in runner.chaos_monkey.chaos)) self.assertTrue(any(c.group != 'net' for c in runner.chaos_monkey.chaos)) self.assertTrue(all(c.command_str != 'deny-all' for c in runner.chaos_monkey.chaos)) self.assertTrue(all(c.command_str != Kill.jujud_cmd for c in runner.chaos_monkey.chaos))
def main(): print(header) stream = open(args.config, 'r') default = open('./configs/default.yaml', 'r') parameters = load(stream) default_parameters = load(default) if (args.command == 'train'): parameters = merge(default_parameters, parameters) print("Training parameters\n-------") print_dic(parameters) runner = Runner(**parameters) runner.run() else: parameters = merge(merge(default_parameters, parameters), { 'deterministic_evaluation': args.det, 'load_dir': args.load_dir }) evaluator = Evaluator(**parameters) evaluator.evaluate()
def test_run_program_io(self): path_source = os.path.join(self.PATH_FIXTURES, "reverser.cpp") path_executable = os.path.join(config.PATH_SANDBOX, "reverser.o") status = Compiler.compile(config.LANGUAGE_CPP, path_source, path_executable) self.assertEqual(status, "") run_result = Runner.run_program(sandbox=Sandbox(), executable_path=path_executable, memory_limit=32000000, timeout=1.0, input_bytes=b"espr1t") self.assertEqual(run_result.exit_code, 0) self.assertEqual(run_result.output.decode().strip(), "t1rpse")
def main(): if _debug: print("Main = %s" % _message) if b4.state != "ON": print("START = %s" % "Runner") r1 = Runner(name="r1", debug=True, control_pin=b4, control_thread=th1) else: print("STOP = %s" % "Runner")
def test_web_security_group_ingress(self): self.assertTrue( Runner.finder( self.result["root_modules"]["aws_security_group.sgrp"], "ingress", { 'from_port': '443', 'to_port': '443', 'Protocol': 'tcp', 'Cidr_blocks': '10.1.0.0/16' }))
def main(rank, world_size, arg): logger = Logger(arg.save_dir) setup(rank, world_size) print(rank) scaled_lr = arg.lr * arg.batch_size / 256 arg.batch_size = int(arg.batch_size / world_size) num_workers = int(arg.num_workers / world_size) net, res = get_model(arg, classes=arg.num_classes) logger.will_write(str(arg) + "\n") net.to(rank) net = nn.parallel.DistributedDataParallel(net, device_ids=[rank]) if not arg.dali: train_loader, val_loader = get_loaders(arg.root, arg.batch_size, res, num_workers, arg.val_batch_size, color_jitter=arg.color_jitter, pca=arg.pca, crop_pct=arg.crop_pct) else: train_loader, val_loader = get_loaders_dali(arg.root, arg.batch_size, res, rank, world_size, num_workers) # net = nn.DataParallel(net).to(torch_device) loss = nn.CrossEntropyLoss() if not arg.no_filter_bias: parameters = add_weight_decay(net, weight_decay=arg.decay) weight_decay = 0 print('filter out bias, bn and other 1d params from weight decay') else: parameters = net.parameters() weight_decay = arg.decay optim = { # "adam" : lambda : torch.optim.Adam(net.parameters(), lr=arg.lr, betas=arg.beta, weight_decay=arg.decay), "sgd": lambda : torch.optim.SGD(parameters, lr=scaled_lr, momentum=arg.momentum, nesterov=True, weight_decay=weight_decay), "rmsproptf": lambda : RMSpropTF(parameters, lr=scaled_lr, momentum=arg.momentum, eps=arg.eps, weight_decay=weight_decay), "rmsprop" : lambda : torch.optim.RMSprop(parameters, lr=scaled_lr, momentum=arg.momentum, eps=arg.eps, weight_decay=weight_decay) }[arg.optim]() scheduler = get_scheduler(optim, arg.scheduler, int(1.0 * len(train_loader)), arg.epoch * len(train_loader), warmup_t=int(arg.warmup * len(train_loader)), warmup_lr_init=0.1 * scaled_lr) arg.epoch = arg.epoch + arg.cool_down if arg.cool_down > 0 else arg.epoch model = Runner(arg, net, optim, rank, loss, logger, scheduler, world_size) if arg.profiler: model.profiler(train_loader, val_loader, train_loader.sampler) elif arg.test is False: if not arg.dali: model.train(train_loader, val_loader, train_loader.sampler) else: model.train(train_loader, val_loader) cleanup()
def generate_run_tasks(): for algorithm_config in self.algorithm_configs: runner = Runner(instance, logger, self.iterations, algorithm_config) for run_nr in range(self.run_count): # using the same runner for each thread is fine, # because run_algorithm does not mutate the runner instance yield (runner, run_nr) for _ in range(self.cpu_count): yield 'STOP'
def test_run_program_output_limit(self): path_source = os.path.join(self.PATH_FIXTURES, "outputlimit.cpp") path_executable = os.path.join(config.PATH_SANDBOX, "outputlimit.o") status = Compiler.compile(config.LANGUAGE_CPP, path_source, path_executable) self.assertEqual(status, "") run_result = Runner.run_program(sandbox=Sandbox(), executable_path=path_executable, memory_limit=64000000, timeout=1.0, input_bytes=None) self.assertEqual(run_result.exit_code, 0) self.assertEqual(len(run_result.output.decode()), config.MAX_EXECUTION_OUTPUT)
def measurement__write_measurement(self, identifier): # # VARIABLES # from environment import Environment from measurement import Measurement from runner import Runner # # CODE # # we need the environment environment_directory = 'environments/' measurement_directory = 'measurements/' environment = Environment(identifier) environment.read_environment_file("./" + environment_directory + identifier + ".xml") environment.initialize() # and we need to create an exogenous network environment.network.create_exogenous_network( environment.parameter.network_density) # and also a measurement measurement = Measurement() for agent in environment.network.network.nodes(): F0s = 1.0 F1s = 1.0 # we need the previous decisions of neighbors neighbors_previous_decision = [] for neighbor in environment.network.network.neighbors(agent): neighbors_previous_decision.append(neighbor.previous_x) agent.compute_decision(neighbors_previous_decision, environment.parameter.num_agents, F0s, F1s) runner = Runner(identifier, environment, measurement) print runner runner.do_run() print runner measurement.write_measurement(measurement_directory + identifier)
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" access_key = "foo" secret_key = "bar" profile = "foo" skip_credentials_validation = true skip_get_ec2_platforms = true skip_requesting_account_id = true } module "my_module" { source = "./mymodule" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" profile = "foo" skip_credentials_validation = true skip_get_ec2_platforms = true } module "root_modules" { source = "./mymodule" providers = {aws = "aws"} appsvpc_id = "1234" dq_lambda_subnet_cidr = "10.1.42.0/24" apps_vpc_id = "vpc-12345" naming_suffix = "apps-preprod-dq" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def main(): if not path.exists("Runners"): try: mkdir("Runners") except OSError: print ("Error Creating Runner Directory") runnersDict = {} for runner in listdir("%s/Runners" % getcwd()): GlobalrunnersDict[runner] = Runner(runner) myApplicationManager(GlobalrunnersDict).start()
def test_commandline_generation(self): runner = Runner("keyboardlayout.exe", [ "run_instance.py", "test_instance --param1 4", "instance_info", "cutoff_time", "cutoff_length", "seed", "--first_extra_arg", "--second_extra_arg" ]) self.assertEqual( runner.cmd, "keyboardlayout.exe --smac --test test_instance --param1 4 --instance_info instance_info --cutoff_time cutoff_time --cutoff_length cutoff_length --seed seed --first_extra_arg --second_extra_arg" )
def test_security_group_egress(self): self.assertTrue( Runner.finder( self.result["root_modules"]["aws_security_group.sgrp"], egress, { 'from_port': '0', 'to_port': '0', 'Protocol': '-1', 'Cidr_blocks': '0.0.0.0/0' }))
def setUp(self): self.snippet = """ provider "aws" { region = "eu-west-2" access_key = "foo" secret_key = "bar" profile = "foo" skip_credentials_validation = true skip_get_ec2_platforms = true skip_requesting_account_id = true } resource "aws_instance" "foo" { ami = "foo" instance_type = "t2.micro" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def searchBranch(self): self.checkFileExist() cmd = [] cmd.append('cd ' + self.workdir) cmd.append(' git branch -a ') return Runner([{'conn': 'local'}]).run('raw', ' && '.join(cmd))
def run(self, file): try: instance = graph.read_file(file) except FileNotFoundError: print("Cannot find file {}, skipping.".format(file)) return "" logfile_name = "_".join([os.path.basename(file), str(floor(time()))]) logger = Logger(logfile_name, log_to_stdout=self.log_to_stdout) if self.cpu_count > 1: run_tasks = Queue(self.cpu_count) finished_queue = Queue() def generate_run_tasks(): for algorithm_config in self.algorithm_configs: runner = Runner(instance, logger, self.iterations, algorithm_config) for run_nr in range(self.run_count): # using the same runner for each thread is fine, # because run_algorithm does not mutate the runner instance yield (runner, run_nr) for _ in range(self.cpu_count): yield 'STOP' for _ in range(self.cpu_count): Process(target=worker, args=(run_tasks, finished_queue)).start() for run_task in generate_run_tasks(): run_tasks.put(run_task) for _ in range(len(self.algorithm_configs * self.run_count)): lines = finished_queue.get() logger.write(lines) else: for algorithm_config in self.algorithm_configs: runner = Runner(instance, logger, self.iterations, algorithm_config) for run_nr in range(self.run_count): runner.run_algorithm(run_nr) logger.close() return logfile_name
def manageJob(self, id, name, jsonString): #we will want to capture the process exit code and SGE number jobNumberSGE = -1 exitStatus = 1 #parse the json string parameters = json.loads(jsonString) #determine which type of job we are running analysisType = parameters["analysis_type"] #build the command-line call commandLine = "" #see if output directory exists, if not create it output_directory = "%s/%s" % (BASE_OUTPUT_DIRECTORY, id) if(not os.path.exists(output_directory)): os.makedirs(output_directory) #single sample transposon if(analysisType == "transposon_single_sample"): read_coverage_minimum = parameters["settings"]["read_coverage_minimum"] read_size_minimum = parameters["settings"]["read_size_minimum"] maximum_distance = parameters["settings"]["maximum_distance"] output_file = "%s.xls" % (name) commandLine = "/Volumes/HD/mattdyer/Documents/Work/LAM/programming/transposon_single_sample.pl -uniprot_file \"%s\" -background_file \"%s\" -min_coverage %s -min_size %s -max_distance %s -output_directory \"%s\" -output_file \"%s\"" % (UNIPROT_FILE, BACKGROUND_FILE, read_coverage_minimum, read_size_minimum, maximum_distance, output_directory, output_file) #now add the files for file in parameters["files"]: commandLine += " -file=\"%s\"" % (file) #now we can pass the job to be executed over to the job runner runner = Runner("CommandLine") self.setStatus(id, "Running", jobNumberSGE) exitStatus = runner.runCommandLine(commandLine) #will update this when we cut over to SGE on the Torrent Server if(not exitStatus == 0): self.setStatus(id, "Failed", jobNumberSGE) else: self.setStatus(id, "Complete", jobNumberSGE)
class ReadAfterDoneTest(unittest.TestCase): _runner = None def setUp(self): self._runner = Runner() self._runner.add("echo", "echo -n") def test_read_after_done(self): self._runner.start("echo", with_args=["test"]) self.addCleanup(lambda: self._runner.terminate("echo")) chan = self._runner.get_channel("echo") self.assertEqual(_readall(chan), b'test') def test_buffered_read_after_done(self): self._runner.start("echo", with_args=["test"], buffering='line') self.addCleanup(lambda: self._runner.terminate("echo")) chan = self._runner.get_channel("echo") self.assertEqual(_readall(chan), b'test')
def new(size: Size) -> Runner: name = 'attrs' if size == Size.Small: unp = SMALL pac = data.SMALL cls = Small elif size == Size.Medium: unp = MEDIUM pac = data.MEDIUM cls = Medium return Runner(name, unp, None, partial(de, cls, pac), None, None)
def test_overhead(self): # Create a ~50MB byte array as input input_bytes = os.urandom(50000000) # Execute a very light command which shouldn't take any time and measure the overhead start_time = perf_counter() stdout_bytes, stderr_bytes = Runner.run(sandbox=Sandbox(), command="pwd", input_bytes=input_bytes) self.assertEqual(stderr_bytes.decode().strip(), "") self.assertEqual(stdout_bytes.decode().strip(), "/home") # The overhead for getting a sandbox, passing the input, and getting the output shouldn't be more than 0.2s self.assertLess(perf_counter() - start_time, 0.2)
def modem_manager_start_in_debug_mode(cls): dbg_mode = cls.modem_manager_in_debug_mode() if not dbg_mode: Runner.run_cmd('sudo stop modemmanager') time.sleep(2) Runner.run_cmd('/usr/sbin/ModemManager --debug') time.sleep(5) # Get all modem info again. cls.modem_info() # Ensure debug omde is True dbg_mode = cls.modem_manager_in_debug_mode() if not dbg_mode: Results.add_error( '/usr/sbin/ModemManager --debug', 'Modem manager cannot be started in debug mode.') assert dbg_mode is True return dbg_mode
def runner(): r = Runner() reponame = util.randrepo() os.mkdir(reponame) r.cdrel(reponame) r.runcommand("gin", "init") r.repositories[r.cmdloc] = reponame yield r r.runcommand("gin", "annex", "uninit")
def configure(runner: Runner, app_config): config_path = app_config.syscalls_tests_config config.scope_path = app_config.scope_path logging.info(f"Reading syscalls tests config from file {config_path}") with open(config_path, "r") as f: syscalls_tests_config = json.load(f) for location in syscalls_tests_config["locations"]: enabled = location.get("enabled", True) home_dir = location["home"] if not enabled: logging.debug(f"Location {home_dir} is ignored") continue included_tests = location.get("include_tests", []) excluded_tests = location.get("exclude_tests", []) tests = locate_tests(home_dir, included_tests, excluded_tests) runner.add_tests(tests)
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" #profile = "foo" skip_credentials_validation = true skip_get_ec2_platforms = true } module "dailytasks" { source = "./mymodule" # providers = {aws = "aws"} path_module = "./" namespace = "notprod" naming_suffix = "notprod-dq" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def run_specific_task_using_python(task): print("OK") service_setup = task.service_setup node = service_setup.deployment.node #print(task.service_setup.deployment.node) target = node.node_info.node_name runner = Runner('ansible_compute.yml', 'multinode', { 'extra_vars': { 'target': target }, 'tags': ['install', 'uninstall'] }, task.task_display_name, True, None, None, None) #ansible-playbook ansible_compute.yml --extra-vars "target=target other_variable=foo" --tags "install, uninstall" --start-at-task=task.task_display_name --step print(runner.variable_manager) log_run = runner.run() print(log_run)
def test_filter_commands_gets_options_from_random_chaos(self): with patch('runner.Runner._run_command', autospec=True): with patch('runner.Runner.filter_commands', autospec=True) as f_mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.random_chaos( run_timeout=1, enablement_timeout=1, include_group='net,{}'.format(Kill.group), exclude_group=Kill.group, include_command='deny-all', exclude_command='deny-incoming') expected = {'include_group': 'net,{}'.format(Kill.group), 'exclude_group': Kill.group, 'include_command': 'deny-all', 'exclude_command': 'deny-incoming'} call_params = f_mock.call_args_list[0][1:] for k, v in call_params[0].items(): self.assertEqual(expected[k], v)