def test_arg_parser_error(mock_parser, args, capsys, parser_out): args = dict(list(default_args.items()) + list(args.items())) mock_parser.return_value = argparse.Namespace(**args) with pytest.raises(SystemExit): parse_args('', '') out, err = capsys.readouterr() if parser_out not in err: pytest.fail(err)
def test_local_path(mock_is_symlink, mock_exists, mock_parser, args, exists, is_symlink, parser_out, capsys): args = dict(list(default_args.items()) + list(args.items())) mock_parser.return_value = argparse.Namespace(**args) mock_exists.return_value = exists mock_is_symlink.return_value = is_symlink with pytest.raises(SystemExit): parse_args('', '') out, err = capsys.readouterr() if parser_out not in err: pytest.fail(err)
def main(): args = parse_args() random.seed(args.seed) torch.manual_seed(args.seed) logging.info('Reading dataset metadata.') train_dataset, test_dataset = get_dataset(args) train_obj2image, test_obj2image = train_dataset.final_obj2image, test_dataset.final_obj2image train_demo, test_demo = 'demo_data/train', 'demo_data/test' os.makedirs(train_demo, exist_ok=True) os.makedirs(test_demo, exist_ok=True) save_image_to_dir(train_obj2image, train_dataset.time_to_clip_ind_image_adr, train_demo) save_image_to_dir(test_obj2image, test_dataset.time_to_clip_ind_image_adr, test_demo) raise RuntimeError("finish making demo") logging.info('Constructing model.') model, loss, restarting_epoch = get_model_and_loss(args) print("Debug model construction finished!") optimizer = model.optimizer() train_one_data(model=model, loss=loss, optimizer=optimizer, test_dataset=train_dataset, args=args)
def main(): args = parse_args() random.seed(args.seed) torch.manual_seed(args.seed) logging.info('Reading dataset metadata.') train_loader, val_loader = get_dataset(args) logging.info('Constructing model.') model, loss, restarting_epoch = get_model_and_loss(args) print("Model construction finished!") lowest_val_error = np.inf if args.mode == 'train': optimizer = model.optimizer() for i in range(restarting_epoch, args.epochs): print('Epoch[', i, ']') train_err = train.train_one_epoch(model, loss, optimizer, train_loader, i + 1, args) if args.save_frequency != -1 and i % args.save_frequency == 0: torch.save( model.state_dict(), os.path.join(args.save, 'model_state_{:02d}.pytar'.format(i + 1))) val_err = test.test_one_epoch(model, loss, val_loader, i + 1, args) if val_err < lowest_val_error: lowest_val_error = val_err torch.save( model.state_dict(), os.path.join(args.save, 'best_model.pytar'.format(i + 1))) elif args.mode == 'test' or args.mode == 'testtrain': if args.mode == 'testtrain': val_loader = train_loader if args.reload_dir is not None: all_saved_models = [ f for f in os.listdir(args.reload_dir) if f.endswith('.pytar') ] all_indices = [ f.split('_')[-1].replace('.pytar', '') for f in all_saved_models ] int_indices = [int(f) for f in all_indices] int_indices.sort() for epoch in int_indices: args.reload = os.path.join( args.reload_dir, 'model_state_{:02d}.pytar'.format(epoch)) print('Loaded ', args.reload, 'epoch', epoch) model, loss, restarting_epoch = get_model_and_loss(args) test.test_one_epoch(model, loss, val_loader, epoch, args) else: test.test_one_epoch(model, loss, val_loader, 0, args) elif args.mode == 'savegtforce': save_gt_force.save_gt_force(model, loss, train_loader, 0, args) else: raise NotImplementedError("Unsupported mode {}".format(args.mode))
def test_original(): args = parse_args() random.seed(args.seed) root_dir = args.data object_list = args.object_list object_paths = { obj: os.path.join(root_dir, 'objects_16k', obj, 'google_16k', 'textured.urdf') for obj in object_list } nproc = 11 nenvs = 44 phy_env = SubprocPhysicsEnv(args=args, object_paths=object_paths, context='spawn', nproc=nproc, nenvs=nenvs) phy_env.reset() # generate test data. force_data = [(-0.0047, -0.0841, -0.0801) for i in range(5)] state_data = { 'object_name': '005_tomato_soup_can', 'position': (-0.2421, 0.0213, 0.9691), 'rotation': (0.3126, -0.5557, 0.4300, -0.6392), 'velocity': (0.0, 0.0, 0.0), 'omega': (0., 0., 0.) } # Initially, one cannot use the list version as input. list_of_contact_points = [[-0.0934, -0.0214, 0.0495], [-0.0651, -0.0909, -0.0848], [-0.0042, 0.0523, -0.0092], [0.0637, -0.0939, -0.0402], [0.0107, 0.0885, 0.0101]] one_data = { 'forces': force_data, 'initial_state': state_data, 'object_num': None, 'list_of_contact_points': list_of_contact_points } batch_test_data = [one_data for i in range(nenvs)] print("Test infer time.") bt = time.time() for i in range(1024): phy_env.batch_init_locations_and_apply_force( batch_data=batch_test_data) total_time = time.time() - bt print("Consuming time: ", total_time) print("Time per call: ", total_time / 2000 / nenvs)
def test_arg_parser_success(mock_exists, mock_parser, args, res): args = dict(list(default_args.items()) + list(args.items())) res = dict(list(args.items()) + list(res.items())) mock_exists.return_value = True mock_parser.return_value = argparse.Namespace(**args) out = vars(parse_args('', '')) for key in res: if out[key] != res[key] and key != 'file': pytest.fail(f'{key}: {out[key]} != {res[key]}')
def main(): args = parse_args() random.seed(args.seed) torch.manual_seed(args.seed) train_dataset = get_dataset(args, train=True) logging.info("Generating synthetic dataset for train.") generate_data(args=args, dataset=train_dataset, clean_dataset=True) test_dataset = get_dataset(args=args, train=False) logging.info("Generating synthetic dataset for test.") generate_data(args=args, dataset=test_dataset, clean_dataset=True)
logs.write_to_log_file(string) logs.append_test_loss(test_losses) logs.create_log( args, decoder=decoder, encoder=encoder, optimizer=optimizer, final_test=True, test_losses=test_losses, ) if __name__ == "__main__": args = arg_parser.parse_args() logs = logger.Logger(args) if args.GPU_to_use is not None: logs.write_to_log_file("Using GPU #" + str(args.GPU_to_use)) ( train_loader, valid_loader, test_loader, loc_max, loc_min, vel_max, vel_min, ) = data_loader.load_data(args)
) log.info('Image successfully published') def rmi(self): """Remove Docker image from the host machine""" image = self.docker_api.client.images.get(self.image_name) self.docker_api.client.images.remove(image.short_id, force=True) if __name__ == '__main__': started_time = timeit.default_timer() exit_code = ExitCode.success try: product_name = 'Intel(R) Distribution of OpenVINO(TM) toolkit' des = f'DockerHub CI framework for {product_name}' args = parse_args(name=os.path.basename(__file__), description=des) logdir: pathlib.Path = pathlib.Path(os.path.realpath( __file__), ).parent / 'logs' / args.tags[0].replace( '/', '_').replace(':', '_') if not logdir.parent.exists(): logdir.parent.mkdir() logfile = logger.init_logger(logdir) if hasattr(args, 'image_json_path') and not args.image_json_path: args.image_json_path = logdir / 'image_data.json' launcher = Launcher(product_name, args, logdir) log.info(logger.LINE_DOUBLE) log.info(f'{des} v{__version__}') log.info(logger.LINE_DOUBLE) log.info(f'Log: {logfile}') log.info(f'Command: {" ".join(sys.argv)}')
def test_hostname_if_no_args(): args = [] with pytest.raises(SystemExit): arg_parser.parse_args(args)
def test_hostname(): args = ['-s', '8.8.8.8', 'google.com'] parsed_args = arg_parser.parse_args(args) assert parsed_args.hostname == 'google.com'
count += 1 def run_MGAN(args, device): MGAN_params = { 'data': args.data, 'epochs': args.MGAN_epochs, 'pixel_blockSize': args.image_size, 'netS_weight': args.MGAN_netS_weight, 'model_folder': 'MGAN/', 'device': device } create_folder(MGAN_params['data'] + MGAN_params['model_folder']) model = MGAN(MGAN_params) model.run() if __name__ == '__main__': args = parse_args() device = torch.device(args.device) if args.MDAN: run_MDAN(args, device) if args.AG: run_AG(args, device) if args.MGAN: run_MGAN(args, device)
async def c_braillefy(self, msg: twitchirc.ChannelMessage): cd_state = main.do_cooldown('braille', global_cooldown=0, local_cooldown=60, msg=msg) if cd_state: return try: args = arg_parser.parse_args(msg.text.split(' ', 1)[1], { 'url': str, 'sensitivity_r': float, 'sensitivity_g': float, 'sensitivity_b': float, 'sensitivity_a': float, 'size_percent': float, 'max_y': int, 'pad_y': int, 'reverse': bool, 'hastebin': bool }, strict_escapes=True, strict_quotes=True) except arg_parser.ParserError as e: return f'Error: {e.message}' missing_args = arg_parser.check_required_keys(args, ['url']) if missing_args: return ( f'Error: You are missing the {",".join(missing_args)} ' f'argument{"s" if len(missing_args) > 1 else ""} to run this command.' ) num_defined = sum( [args[f'sensitivity_{i}'] is not Ellipsis for i in 'rgb']) alpha = args['sensitivity_a'] if args[ 'sensitivity_a'] is not Ellipsis else 1 if num_defined == 3: sens: typing.Tuple[float, float, float, float] = (args['sensitivity_r'], args['sensitivity_g'], args['sensitivity_b'], alpha) is_zero = bool(sum([args[f'sensitivity_{i}'] == 0 for i in 'rgba'])) if is_zero: return f'Error: Sensitivity cannot be zero. MEGADANK' elif num_defined == 0: sens = (1, 1, 1, 1) else: return f'Error: you need to define either all sensitivity fields (r, g, b, a) or none.' if args['size_percent'] is not ... and args['max_y'] is not ...: return f'Error: you cannot provide the size percentage and maximum height at the same time.' max_x = 60 if args['size_percent'] is Ellipsis else None max_y = (args['max_y'] if args['max_y'] is not Ellipsis else 60) if args['size_percent'] is Ellipsis else None size_percent = None if args['size_percent'] is Ellipsis else args[ 'size_percent'] img = await braille.download_image(args['url']) img: Image.Image if img.format.lower() != 'gif': img, o = await braille.crop_and_pad_image( True, img, max_x, max_y, '', (60, args['pad_y'] if args['pad_y'] is not Ellipsis else 60), size_percent) o += await braille.to_braille_from_image( img, reverse=True if args['reverse'] is not Ellipsis else False, size_percent=size_percent, max_x=max_x, max_y=max_y, sensitivity=sens, enable_padding=True, pad_size=(60, args['pad_y'] if args['pad_y'] is not Ellipsis else 60), enable_processing=False) else: missing_permissions = main.bot.check_permissions( msg, ['cancer.braille.gif'], enable_local_bypass=False) if missing_permissions: o = 'Note: missing permissions to convert a gif. \n' else: o = '' frame = -1 start_time = time.time() while 1: try: img.seek(frame + 1) except EOFError: break frame += 1 o += f'\nFrame {frame}\n' frame_start = time.time() o += await braille.to_braille_from_image( img.copy(), reverse=True if args['reverse'] is not Ellipsis else False, size_percent=size_percent, max_x=max_x, max_y=max_y, sensitivity=sens, enable_padding=True, pad_size=(60, (args['pad_y'] if args['pad_y'] is not Ellipsis else 60)), enable_processing=True) time_taken = round(time.time() - start_time) frame_time = round(time.time() - frame_start) if frame % self.status_every_frames == 0 and time_taken > self.time_before_status: speed = round(1 / frame_time) main.bot.send( msg.reply( f'@{msg.user}, ppCircle Converted {frame} frames in ' f'{time_taken} seconds, speed: {round(speed)} fps, ' f'eta: {(img.n_frames - frame) * speed} seconds.' )) await asyncio.sleep(0) sendable = ' '.join(o.split('\n')[1:]) if args['hastebin'] is not Ellipsis or len(sendable) > 500: return ( f'{"This braille was too big to be posted." if not args["hastebin"] is not Ellipsis else ""} ' f'Here\'s a link to a hastebin: ' f'{plugin_hastebin.hastebin_addr}' f'{await plugin_hastebin.upload(o)}') else: return sendable
async def c_braillefy(self, msg: twitchirc.ChannelMessage): try: args = arg_parser.parse_args( msg.text.split(' ', 1)[1], { 'url': str, 'emote': str, 'sensitivity_r': float, 'sensitivity_g': float, 'sensitivity_b': float, 'sensitivity_a': float, 'size_percent': float, 'max_y': int, 'pad_x': int, 'reverse': bool, 'hastebin': bool, 'sobel': bool, 'resize': bool }, defaults={ 'resize': True, 'sensitivity_r': 2, 'sensitivity_g': 2, 'sensitivity_b': 2, 'sensitivity_a': 1, 'url': ..., 'emote': ..., 'size_percent': ..., 'max_y': ..., 'pad_x': ..., 'reverse': False, 'hastebin': False, 'sobel': False, }, strict_escapes=True, strict_quotes=True ) except arg_parser.ParserError as e: return main.CommandResult.OTHER_FAILED, f'Error: {e.message}' # region argument parsing missing_args = [] if args['url'] is ... and args['emote'] is ...: missing_args.append('url or emote') if missing_args: return (main.CommandResult.OTHER_FAILED, f'Error: You are missing the {",".join(missing_args)} ' f'argument{"s" if len(missing_args) > 1 else ""} to run this command.') if args['url'] is not ... and args['emote'] is not ...: return main.CommandResult.OTHER_FAILED, f'@{msg.user}, cannot provide both an emote name and a url.' is_zero = bool(sum([args[f'sensitivity_{i}'] == 0 for i in 'rgba'])) if is_zero: return main.CommandResult.OTHER_FAILED, f'Error: Sensitivity cannot be zero. MEGADANK' if args['size_percent'] is not ... and args['max_y'] is not ...: return (main.CommandResult.OTHER_FAILED, f'Error: you cannot provide the size percentage and maximum height at the same time.') max_x = 60 if args['size_percent'] is Ellipsis else None max_y = (args['max_y'] if args['max_y'] is not Ellipsis else 60) if args['size_percent'] is Ellipsis else None size_percent = None if args['size_percent'] is Ellipsis else args['size_percent'] if args['url'] is not ...: missing_perms = await main.bot.acheck_permissions(msg, ['cancer.braille.url'], enable_local_bypass=False) if missing_perms: return (main.CommandResult.OTHER_FAILED, f'@{msg.user}, You are missing the "cancer.braille.url" permission to use the url argument.') url = args['url'] if args['url'] is not ... else None if url and url.startswith('file://'): return main.CommandResult.OTHER_FAILED, f'@{msg.user}, you can\'t do this BabyRage' if args['emote'] is not ...: channel_id = None if isinstance(msg, twitchirc.ChannelMessage): channel_id = msg.flags['room-id'] emote = args['emote'] if emote.startswith('#') and emote.count(':') == 1: channel, emote = emote.split(':') channel = channel.lstrip('#') users = main.User.get_by_name(channel) if users: u = users[0] channel_id = u.twitch_id else: async with aiohttp.request('get', f'https://api.ivr.fi/twitch/resolve/{channel}') as req: if req.status == 404: return main.CommandResult.OTHER_FAILED, f'@{msg.user}, {channel}: channel not found' data = await req.json() channel_id = data['id'] emote_found = await plugin_emotes.find_emote(emote, channel_id=channel_id) if emote_found: url = emote_found.get_url('3x') else: return (main.CommandResult.OTHER_FAILED, f'@{msg.user}, Invalid url, couldn\'t find an emote matching this.') # endregion img = await braille.download_image(url) img: Image.Image if img.format.lower() != 'gif': o = await self._single_image_to_braille( args, img, max_x, max_y, ( args['sensitivity_r'], args['sensitivity_g'], args['sensitivity_b'], args['sensitivity_a'], ), size_percent ) else: # region gifs missing_permissions = await main.bot.acheck_permissions(msg, ['cancer.braille.gif'], enable_local_bypass=False) if missing_permissions: o = 'Note: missing permissions to convert a gif. \n' else: o = '' frame = -1 start_time = time.time() while 1: try: img.seek(frame + 1) except EOFError: break frame += 1 o += f'\nFrame {frame}\n' frame_start = time.time() o += await self._single_image_to_braille( args, img, max_x, max_y, ( args['sensitivity_r'], args['sensitivity_g'], args['sensitivity_b'], args['sensitivity_a'], ), size_percent ) time_taken = round(time.time() - start_time) frame_time = time.time() - frame_start if frame_time > 1: frame_time = round(frame_time) # avoid division by zero if frame % self.status_every_frames == 0 and time_taken > self.time_before_status: speed = round(1 / frame_time, 1) speed_msg = (f'@{msg.user}, Converted {frame} frames in ' f'{time_taken} seconds, speed: {speed} fps, ' f'eta: {(img.n_frames - frame) * speed} seconds.') if main.check_spamming_allowed(msg.channel): await main.bot.send(msg.reply(speed_msg)) else: if frame % (self.status_every_frames * 2) == 0: await main.bot.send(msg.reply_directly(speed_msg)) await asyncio.sleep(0) # endregion sendable = ' '.join(o.split('\n')[1:]) if args['hastebin'] or len(sendable) > 500: return (f'{"This braille was too big to be posted." if not args["hastebin"] else ""} ' f'Here\'s a link to a hastebin: ' f'{plugin_hastebin.hastebin_addr}' f'{await plugin_hastebin.upload(o)}') else: return sendable