def dir_remotes(remoteloc): # Use 2 runner instances to checkout two clones and create merge conflicts loca = Runner(False) reponame = util.randrepo() os.mkdir(reponame) loca.cdrel(reponame) # Create repo in A loca.runcommand("gin", "init") loca.runcommand("gin", "add-remote", "--create", "--default", "origin", f"dir:{remoteloc.name}") loca.runcommand("gin", "upload") loca.repositories[loca.cmdloc] = None # Init in B and download locb = Runner(False) os.mkdir(reponame) locb.cdrel(reponame) locb.runcommand("gin", "init") locb.runcommand("gin", "add-remote", "--default", "origin", f"dir:{remoteloc.name}") locb.runcommand("gin", "download") locb.repositories[locb.cmdloc] = None return (loca, locb)
def run_kmeans(): if len(argv) < 4: print('Not enough arguments provided. Please provide 3 arguments: K, num_iterations, path_to_input') exit(1) k = int(argv[1]) num_iterations = int(argv[2]) input_path = argv[3] if len(argv) == 5: random_seed = int(argv[4]) else: random_seed = 9 if k <= 1 or num_iterations <= 0: print('Please provide correct parameters') exit(1) if not os.path.exists(input_path): print('Input file does not exist') exit(1) points = load_data(input_path) if k >= len(points): print('Please set K less than size of dataset') exit(1) print("K max min mean") runner1 = Runner(9, 3, points, 10) runner2 = Runner(9, 4, points, 10) runner3 = Runner(9, 5, points, 10) runner_list = [runner1, runner2, runner3] for runner in runner_list: runner.start_running()
def gdrive(): try: cookie = request.elfowl_cookie user = cookie.user except AttributeError: user = "******" log.info("Accessed by: %s" % user) form = FilesOwnershipTransferForm() user_info = None files = None if request.method == "POST": user_id = form.data["USER_ID"] user_is_valid = ldap_client.is_valid_user(user_id) if user_is_valid: user_info = ldap_client.get_user_info(user_id) file_search = form.data["FILE_SEARCH"] new_owner = form.data["NEW_OWNER"] files = [] runner = Runner(user=user_id) if new_owner: new_owner_is_valid_user = ldap_client.is_valid_user(new_owner) if new_owner_is_valid_user: files = runner.drive_api.search_files_list( drive_query=file_search) runner_new_owner = Runner(user=new_owner) runner.suspend_user(False) runner_new_owner.suspend_user(False) for file in files: if file["id"] in request.form.keys(): result = str( runner.drive_api.transfer_file_owner( file["id"], runner_new_owner.user_email)) file["chown"] = result log.info("File Transfer: %s" % (file["chown"])) runner.suspend_user(True) else: flash("WARNING: %s is not a valid LDAP user." % user_id) return redirect(url_for('gdrive')) elif file_search: files = runner.drive_api.search_files_list( drive_query=file_search) else: flash("WARNING: %s is not a valid LDAP user." % user_id) return redirect(url_for('gdrive')) return render_template("gdrive.html", form=form, users=json.dumps(ldap_users), user=user, user_info=user_info, ldap_fields=config["ldap"]["fields"], files=files)
def pool_run_args(argses, super_dirname, output_every, t_upto, resume): runners = [] for args in argses: output_dirname = make_output_dirname(args) output_dirpath = join(super_dirname, output_dirname) if resume and get_filenames(output_dirpath): runner = Runner(output_dirpath, output_every) else: model = AgentModel(**args) runner = Runner(output_dirpath, output_every, model=model) runner.clear_dir() runners.append(runner) pool_run(runners, t_upto)
def test_filter_command_include_command(self): include_command = 'deny-all' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.filter_commands(include_command=include_command) self.assertEqual(len(runner.chaos_monkey.chaos), 1) self.assertEqual(runner.chaos_monkey.chaos[0].command_str, 'deny-all')
def test_random_enablement_zero(self): with patch('utility.check_output', autospec=True) as mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.random_chaos(run_timeout=1, enablement_timeout=0, exclude_command=Kill.restart_cmd) self.assertEqual(mock.called, True)
def test_random_assert_run_command_method_called(self): with patch('utility.check_output', autospec=True): with patch('runner.Runner._run_command', autospec=True) as cm_mock: with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) runner.random_chaos(run_timeout=1, enablement_timeout=1) cm_mock.assert_called_with(runner, 1)
def test_absolute_or_relative_comparison(self): runner = Runner(self.evaluator) # Absolute difference is less than 10-9 message, score = runner.validate_output( 'unit_tests/fixtures/FloatComparison/FloatAbsoluteOK.out', 'unit_tests/fixtures/FloatComparison/FloatAbsolute.sol') self.assertEqual('', message) self.assertEqual(1.0, score) # Absolute difference is greater than 10-9 message, score = runner.validate_output( 'unit_tests/fixtures/FloatComparison/FloatAbsoluteWA.out', 'unit_tests/fixtures/FloatComparison/FloatAbsolute.sol') self.assertNotEqual('', message) self.assertEqual(0.0, score) # Relative difference is less than 10-9 message, score = runner.validate_output( 'unit_tests/fixtures/FloatComparison/FloatRelativeOK.out', 'unit_tests/fixtures/FloatComparison/FloatRelative.sol') self.assertEqual('', message) self.assertEqual(1.0, score) # Relative difference is greater than 10-9 message, score = runner.validate_output( 'unit_tests/fixtures/FloatComparison/FloatRelativeWA.out', 'unit_tests/fixtures/FloatComparison/FloatRelative.sol') self.assertNotEqual('', message) self.assertEqual(0.0, score)
def runner(): """Set up git checkout for test and return a runner to run operations on it. """ runner = Runner("unit-e") runner.checkout_unit_e_clone(label="urls") return runner
def test1(self): runner_obj = Runner('python3 soln.py') with open('test_runner_output2') as f: expected_output = f.read() self.assertEqual( runner_obj.run('test_runner_input2')['output'], expected_output.strip())
def main(): args = parse_args() set_global_seeds(666) config = read_config(args.config, "TRAIN") config_main = read_config(args.config, "MAIN") pprint(config) factory = Factory(config['train_params']) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') callbacks = create_callbacks(config['train_params']['name'], config['dumps']) trainer = Runner(stages=config['stages'], factory=factory, callbacks=callbacks, device=device) aug_train = AUGMENTATIONS_TRAIN_CROP if config['train_params']['type'] == 'crop' else AUGMENTATIONS_TRAIN aug_test = AUGMENTATIONS_TEST_CROP if config['train_params']['type'] == 'crop' else AUGMENTATIONS_TEST train_dataset = SegmentationDataset(data_folder=config_main['path_to_data'], transforms=aug_train, phase='train', activation=config_main['activation'], fold=config['fold'], empty_mask_params=config['data_params']['empty_mask_increase']) val_dataset = SegmentationDataset(data_folder=config_main['path_to_data'], transforms=aug_test, phase='val', fold=config['fold'], activation=config_main['activation']) train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, num_workers=16, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=config['batch_size'], shuffle=False, num_workers=16) os.makedirs(os.path.join(config['dumps']['path'], config['dumps']['weights'], config['train_params']['name']), exist_ok=True) shutil.copy(args.config, os.path.join(config['dumps']['path'], config['dumps']['weights'], config['train_params']['name'], args.config.split('/')[-1])) trainer.fit(train_loader, val_loader)
def test_filter_commands_exclude_incorrect_group(self): exclude_group = 'net,killl' with temp_dir() as directory: runner = Runner(directory, ChaosMonkey.factory()) with self.assertRaisesRegexp( BadRequest, "Invalid value given on command line: killl"): runner.filter_commands(exclude_group=exclude_group)
def test_acquire_lock_fails_when_existing_lockfile(self): with temp_dir() as directory: expected_file = os.path.join(directory, 'chaos_runner.lock') open(expected_file, 'a').close() runner = Runner(directory, None) with self.assertRaises(SystemExit): runner.acquire_lock()
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" skip_credentials_validation = true skip_get_ec2_platforms = true } module "haproxy-instance" { source = "./mymodule" providers = { aws = "aws" } haproxy_subnet_cidr_block = "1.2.3.0/24" peeringvpc_id = "1234" haproxy_private_ip = "1.2.3.4" haproxy_private_ip2 = "1.2.3.4" name_prefix = "dq-" SGCIDRs = ["1.2.3.0/24"] az = "foo" route_table_id = "1234" s3_bucket_name = "abcd" s3_bucket_acl = "private" log_archive_s3_bucket = "abcd" naming_suffix = "peering-preprod-dq" namespace = "notprod" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def change_events_ownership(runner, grantee_user_id): if session_id not in subscriptions: subscriptions[session_id] = Queue() log.info("adding new queue to map: %s" % subscriptions) sub = subscriptions[session_id] runner_grantee = Runner(user=grantee_user_id) if runner_grantee.is_suspended_user: runner_grantee.suspend_user(False) moved_events = {} calendar_id = runner.calendar_api.get_calendar_id() grantee_calendar_id = runner_grantee.calendar_api.get_calendar_id() if calendar_id is not None and grantee_calendar_id is not None: runner.calendar_api.move_calendar_ownership(grantee_calendar_id) runner_grantee.calendar_api.move_calendar_ownership(calendar_id) events = runner.calendar_api.list_events(calendar_id) if events: for event in events: result = runner.calendar_api.move_event( event, grantee_calendar_id) if result is True: value = "<span class='text-success'>%s</span>" % result elif v is False: value = "<span class='text-danger'>%s</span>" % result else: value = result moved_events[event] = value sub.put({"change_events_ownership": moved_events}) log.info("adding to queue: %s" % moved_events)
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" profile = "foo" skip_credentials_validation = true skip_get_ec2_platforms = true } module "root_modules" { source = "./mymodule" providers = {aws = aws} acp_prod_ingress_cidr = "10.5.0.0/16" dq_ops_ingress_cidr = "10.2.0.0/16" dq_internal_dashboard_subnet_cidr = "10.1.12.0/24" peering_cidr_block = "1.1.1.0/24" apps_vpc_id = "vpc-12345" naming_suffix = "apps-preprod-dq" s3_archive_bucket = "bucket-name" s3_archive_bucket_key = "1234567890" s3_archive_bucket_name = "bucket-name" s3_httpd_config_bucket = "s3-bucket-name" s3_httpd_config_bucket_key = "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" haproxy_private_ip = "1.2.3.4" haproxy_private_ip2 = "1.2.3.5" environment = "prod" security_group_ids = "sg-1234567890" lambda_subnet = "subnet-1234567890" lambda_subnet_az2 = "subnet-1234567890" rds_enhanced_monitoring_role = "arn:aws:iam::123456789:role/rds-enhanced-monitoring-role" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def __init__(self, dna): self.dna = dna self.network = Network(3, [10, 10, 10], 3) self.network.import_weights(dna.value) self.runner = Runner() self.is_running = False self.fitness = 0
def create_runner(): r = Robot(SensorMap(sensor_map_master), bluetooth=BluetoothMaster(MAC_ADDRESS, PORT)) mission_Mission = Mission([ BorderAction(priority=5), ColorDetAction(colors=[ColorSensor.COLOR_RED], priority=4), DontDrownAction(lakes=[ ColorSensor.COLOR_RED, ColorSensor.COLOR_YELLOW, ColorSensor.COLOR_BLUE ], priority=3), UltrasoundAction(rotate_degrees=0.5, dodge_rocks=False, priority=2), DriveAction(priority=1) ]) mission_Mission2 = Mission([ BorderAction(priority=5), ColorDetAction(colors=[ColorSensor.COLOR_RED], priority=4), DontDrownAction( lakes=[ColorSensor.COLOR_RED, ColorSensor.COLOR_YELLOW], priority=3), UltrasoundAction(rotate_degrees=0.5, dodge_rocks=False, priority=2), DriveAction(priority=1) ]) Runner(r, [mission_Mission, mission_Mission2]).run()
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" skip_credentials_validation = true skip_get_ec2_platforms = true } module "tableau" { source = "./mymodule" providers = { aws = aws } vpc_subnet_cidr_block = "10.2.1.0/24" az = "eu-west-2a" naming_suffix = "ops-preprod-dq" tableau_dev_ip = "1.2.3.4" tableau_deployment_ip = "1.2.3.5" opsvpc_id = "1234" tableau_subnet_cidr_block = "10.2.1.0/24" route_table_id = "1234" ops_config_bucket = "s3-dq-ops-config" apps_aws_bucket_key = "1234" namespace = "NOTPROD" dq_pipeline_ops_readwrite_database_name_list = ["api_input"] dq_pipeline_ops_readonly_database_name_list = ["api_input"] dq_pipeline_ops_readwrite_bucket_list = ["s3-bucket-name"] dq_pipeline_ops_readonly_bucket_list = ["s3-bucket-name"] } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def selectNew_go(self): name = self.newRunnerName runnerObj = Runner(self.newRunnerName) if name not in self.runnersList: self.runnersList.append(name) self.runnersDict[name] = runnerObj self.cbb_select_selector["values"] = self.runnersList
def test_acquire_lock_fails_without_workspace(self): with temp_dir() as directory: runner = Runner(directory, None) # Call runner.acquire_lock at this level, at which point directory # will have already been cleaned up. with self.assertRaises(SystemExit): runner.acquire_lock()
def __init__(self, **kwargs): super().__init__(**kwargs) self.next_screen = False instr = ScrollLabel(txt_sits, size_hint=(0.3, 1), textcolor='#FFFFFF') lbl1 = ScrollLabel('Сделайте 30 приседаний', textcolor='#FFFFFF') self.lbl_sits = Sits(30, textcolor='#FFFFFF') self.run = Runner(total=30, steptime=1.5, size_hint=(0.4, 1), lcolor=(0.44, 0.44, 0.44, 1)) self.run.bind(finished=self.run_finished) line = ColoredLayout(lcolor=(0, 0.5, 0.01, 1)) vlay = BoxLayout(orientation='vertical', size_hint=(0.3, 1)) vlay.add_widget(lbl1) vlay.add_widget(self.lbl_sits) line.add_widget(instr) line.add_widget(vlay) line.add_widget(self.run) self.btn = Button(text='Начать', size_hint=(0.3, 0.2), pos_hint={'center_x': 0.5}) self.btn.background_color = btn_color self.btn.on_press = self.next outer = BoxLayout(orientation='vertical', padding=8, spacing=8) outer.add_widget(line) outer.add_widget(self.btn) self.add_widget(outer)
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" profile = "foo" skip_credentials_validation = true skip_get_ec2_platforms = true } module "root_modules" { source = "./mymodule" providers = {aws = "aws"} namespace = "dq-test" naming_suffix = "preprod" path_module = "unset" ip_address = "10.1.1.1" instance_id = [ "i-1234567890", "i-1234567890" ] ssh_user = "******" command = "uname -a" lambda_subnet = "10.1.1.1/24" lambda_subnet_az2 = "10.1.1.1/24" security_group_ids = "sg-1234567890" count_tag = "1" } """ self.result = Runner(self.snippet).result
def test_runner_initializes_with_players(): player1 = Player('1', 'X') player2 = Player('2', 'O') runner = Runner(player1, player2) assert runner._player1 == player1 assert runner._player2 == player2
def test_add_score_to_winner_none(): runner = Runner(Player('1', 'X'), Player('2', 'O')) runner._end_game(Game_Results.cats_game) assert runner._player1.wins == 0 assert runner._player2.wins == 0
def process_tests(self): start_time = perf_counter() runner = Runner(self) errors = "" test_futures = [] for test in self.tests: test_futures.append([test, executor.submit(runner.run, test)]) for test_future in test_futures: test, future = test_future try: # Wait for the test to be executed future.result() except ValueError as ex: errors += "Internal error on test " + test[ "inpFile"] + "(" + test["inpHash"] + "): " + str(ex) self.logger.error("[Submission {}] {}".format( self.id, str(ex))) break except Exception as ex: self.logger.error("[Submission {}] Got exception: {}".format( self.id, str(ex))) self.logger.info( "[Submission {}] -- executed {} tests in {:.3f}s.".format( self.id, len(self.tests), perf_counter() - start_time)) return errors
def setUpClass(self): self.snippet = """ provider "aws" { region = "eu-west-2" skip_credentials_validation = true skip_get_ec2_platforms = true } module "fms" { source = "./mymodule" providers = { aws = "aws" } appsvpc_id = "1234" opssubnet_cidr_block = "1.2.3.0/24" fms_cidr_block = "10.1.40.0/24" fms_cidr_block_az2 = "10.1.41.0/24" peering_cidr_block = "1.1.1.0/24" az = "eu-west-2a" az2 = "eu-west-2b" naming_suffix = "apps-preprod-dq" environment = "prod" rds_enhanced_monitoring_role = "arn:aws:iam::123456789:role/rds-enhanced-monitoring-role" } """ self.runner = Runner(self.snippet) self.result = self.runner.result
def create_runner(): r = Robot(SensorMap(sensor_map_master), bluetooth=BluetoothMaster(MAC_ADDRESS, PORT)) mission_Name24 = Mission([ BorderAction(rotate_degrees=0.3, priority=4), ColorDetAction(colors=[ColorSensor.COLOR_RED], priority=3), DontDrownAction(lakes=[ ColorSensor.COLOR_RED, ColorSensor.COLOR_YELLOW, ColorSensor.COLOR_BLUE ], priority=2), DriveAction(speed=3, priority=1) ], SpeakCelebration('weeeeeeeeeeeeeeeeeee')) mission_Name52 = Mission([ BorderAction(priority=4), DontDrownAction(lakes=[ ColorSensor.COLOR_RED, ColorSensor.COLOR_YELLOW, ColorSensor.COLOR_BLUE ], priority=3), PushRockAction(number_of_rocks=1, priority=2), DriveAction(speed=3, priority=1) ], DanceCelebration()) Runner(r, [mission_Name24, mission_Name52]).run()
def main(conf): '''train deep learning model ''' log = Logger() with conf.gpu_conf.strategy.scope(): # TODO: remove this line model = Model(conf, log).load() log.info('model created') model.summary() data = Dataset(conf, log).load() log.info('data loaded') # TODO: How to hide this dirty code? conf.set_num_samples_and_steps_per_epoch( data.num_train_samples, data.num_test_samples, data.num_validation_samples ) runner = Runner(model, conf, log) log.info('runner created') runner.train(data) log.info('training done') runner.test(data) log.info('test done')
def before_all(context): context.dir = mkdtemp() atexit.register(shutil.rmtree, context.dir) context.tr_socket = os.path.join(context.dir, "tr_socket") context.translations = {'pa2human': {}, 'human2pa': {}} context.tr_messages = [] context.translator = TranslatorServer(context.tr_socket, ('0.0.0.0', 0), context.translations, context.tr_messages) context.tr_thread = Thread(target=context.translator.run_forever, daemon=True) context.tr_thread.start() logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) while not os.path.exists(context.tr_socket): _LOGGER.info("waiting for %s", context.tr_socket) time.sleep(1) context.runner = Runner() context.runner.add("main", command="sbcl --script run.lisp", buffering="line") # FIXME: using hardcoded addresses here context.alt_sockaddr = ('0.0.0.0', 18011) context.router_addr = ('0.0.0.0', 18012)