def get_parser(): parser = argparse.ArgumentParser(prog='ambry', description='Ambry {}. Management interface for ambry, libraries ' 'and repositories. '.format(ambry._meta.__version__)) parser.add_argument('-l', '--library', dest='library_name', default="default", help="Name of library, from the library secton of the config") parser.add_argument('-c', '--config', default=os.getenv(AMBRY_CONFIG_ENV_VAR), action='append', help="Path to a run config file. Alternatively, set the AMBRY_CONFIG env var") parser.add_argument('--single-config', default=False, action="store_true", help="Load only the config file specified") parser.add_argument('-E', '--exceptions', default=False, action="store_true", help="Show full exception trace on all exceptions") cmd = parser.add_subparsers(title='commands', help='command help') from .library import library_parser from .warehouse import warehouse_parser from .remote import remote_parser from test import test_parser from config import config_parser from source import source_parser from bundle import bundle_parser from root import root_parser library_parser(cmd) warehouse_parser(cmd) source_parser(cmd) remote_parser(cmd) test_parser(cmd) config_parser(cmd) bundle_parser(cmd) root_parser(cmd) return parser
def load_config(self): parser = config.config_parser('zabbix-agent-ng') parser.add_argument('--update-interval', type=int, default=120, help='items update interval') parser.add_argument('--server', help='zabbix feeder server') parser.add_argument('--port', type=int, default=10051, help='zabbix feeder port') parser.add_argument('--pid-file', default='/var/run/zabbix-agent-ng.pid', help='path to pid file') parser.add_argument('--zabbix-conf-dir', default='/etc/zabbix', help='path to zabbix config') parser.add_argument('--daemonize', type=int, default=0, help='daemonize after start') parser.add_argument('--stop', type=int, default=0, help='stop after start') parser.add_argument('--protocol', default='1.8', help='feeder protocol version') parser.add_argument('--hosts', default='', help='virtual hosts list (separated by commas)') parser.parse() self.options = parser.options parser.init_logging()
def client(self): cfg = config.config_parser() consumer_key = cfg.get('auth', 'consumer_key') consumer_secret = cfg.get('auth', 'consumer_secret') consumer = oauth.Consumer(consumer_key, consumer_secret) token = oauth.Token(key=self.user_token, secret=self.user_secret) return oauth.Client(consumer, token)
def test(): from data_load.sql import HubbleBase from config import config_parser, config_get config = config_parser() uri = config_get(config, 'HubbleConnection', 'uri') session = start_session(uri, HubbleBase) qm = QueryManager(session, datetime.now() - timedelta(days=7), datetime.now()) o = qm.query_parts() print('done')
def __load_config(self, cfgpath): self.__base_dir = os.path.dirname(os.path.realpath(__file__)) cfgfile = cfgpath if cfgfile is None: cfgfile = os.sep.join((self.__base_dir, "conf", "crawler.ini")) self.__cfgparser = config_parser(cfgfile) proxy = self.__cfgparser.get("CRAWL_SETTING", "proxy", "") self.__proxy = proxy.strip(" \t\n") self.__data_dir = self.__cfgparser.get("DEPLOY_INFO", "data_path", os.sep.join((self.__base_dir, "data"))) self.__log_dir = self.__cfgparser.get("DEPLOY_INFO", "log_path", os.sep.join((self.__base_dir, "log"))) os.system("mkdir -p %s %s" % (self.__data_dir, self.__log_dir))
src_cameras.append(src_camera) src_rgbs = np.stack(src_rgbs, axis=0) src_cameras = np.stack(src_cameras, axis=0) depth_range = torch.tensor([depth_range[0] * 0.9, depth_range[1] * 1.5]) return {'camera': torch.from_numpy(camera), 'rgb_path': '', 'src_rgbs': torch.from_numpy(src_rgbs[..., :3]), 'src_cameras': torch.from_numpy(src_cameras), 'depth_range': depth_range } if __name__ == '__main__': parser = config_parser() args = parser.parse_args() args.distributed = False # Create ibrnet model model = IBRNetModel(args, load_scheduler=False, load_opt=False) eval_dataset_name = args.eval_dataset extra_out_dir = '{}/{}'.format(eval_dataset_name, args.expname) print('saving results to {}...'.format(extra_out_dir)) os.makedirs(extra_out_dir, exist_ok=True) projector = Projector(device='cuda:0') assert len(args.eval_scenes) == 1, "only accept single scene" scene_name = args.eval_scenes[0] out_scene_dir = os.path.join(extra_out_dir, '{}_{:06d}'.format(scene_name, model.start_step), 'videos')
def train(): # ==================== setup config ========================== parser = config_parser() args = parser.parse_args() setup_runtime(args) log_config(args) # ==================== create NeRF model ===================== output_ch = 5 if args.N_importance > 0 else 4 embed_pos, ch_pos = get_embedder(args.multires) embed_dir, ch_dir = get_embedder(args.multires_views) net_coarse = NeRF(layers=args.netdepth, hidden_dims=args.netwidth, input_ch=ch_pos, input_ch_views=ch_dir, output_ch=output_ch, use_viewdirs=True) net_fine = NeRF(layers=args.netdepth_fine, hidden_dims=args.netwidth_fine, input_ch=ch_pos, input_ch_views=ch_dir, output_ch=output_ch, use_viewdirs=True) params = list(net_coarse.parameters()) params += list(net_fine.parameters()) optimizer = torch.optim.Adam(params=params, lr=args.lrate, betas=(0.9, 0.999)) neural_renderer = Renderer(embed_pos, embed_dir, net_coarse, net_fine, cfg=args) mem_coarse = cal_model_params(net_coarse) mem_fine = cal_model_params(net_fine) print( f'memory usage: net_coarse:{mem_coarse:.4f} MB net_fine:{mem_fine:.4f} MB' ) # ==================== load pretrained model ========================================================= start = 0 if args.checkpoint is not None: start = load_checkpoint(args.checkpoint, net_coarse, net_fine, optimizer) log_dir = os.path.join(args.basedir, args.expname) ckpts = [ os.path.join(log_dir, f) for f in sorted(os.listdir(log_dir)) if 'tar' in f ] if len(ckpts) > 0: print('Found checkpoints', ckpts[-1]) start = load_checkpoint(ckpts[-1], net_coarse, net_fine, optimizer) # ==================== load data ======================================================================== images, poses, render_poses, hwf, i_split = load_blender_data( args.datadir, args.half_res, args.testskip) print('Loaded blender images={} render_poses={} intrinsics={}'.format( images.shape, render_poses.shape, hwf)) i_train, i_val, i_test = i_split # Cast intrinsics to right types H, W, focal = hwf H, W = int(H), int(W) hwf = [H, W, focal] if args.render_test: render_poses = np.array(poses[i_test]) render_poses = torch.Tensor(render_poses).to(device) images = torch.Tensor(images).to(device) poses = torch.Tensor(poses).to(device) # ==================== train =========================================================================== global_step = start for i in trange(start, args.num_iter): img_i = np.random.choice(i_train) target = images[img_i] pose = poses[img_i, :3, :4] rgb, disp, acc, extras = neural_renderer.render(H, W, focal, c2w=pose, target_img=target) img_loss = mse(rgb, extras['target_rgb']) loss = img_loss psnr = mse2psnr(img_loss) if 'rgb0' in extras: img_loss0 = mse(extras['rgb0'], extras['target_rgb']) loss = loss + img_loss0 psnr0 = mse2psnr(img_loss0) optimizer.zero_grad() loss.backward() optimizer.step() # update learning rate decay_rate = 0.1 decay_steps = args.lrate_decay * 1000 new_lr = args.lrate * (decay_rate**(global_step / decay_steps)) for param_group in optimizer.param_groups: param_group['lr'] = new_lr if global_step % args.i_print == 0: mem = torch.cuda.max_memory_cached() / (1024**2) tqdm.write( f"[TRAIN] iter{global_step}: loss:{loss.item()} PSNR:{psnr.item()} lr:{new_lr} mem:{mem} MB" ) if global_step % args.i_weights == 0: path = os.path.join(args.basedir, args.expname, '{:06d}.tar'.format(i)) torch.save( { 'global_step': global_step, 'net_coarse': net_coarse.state_dict(), 'net_fine': net_fine.state_dict(), 'optimizer': optimizer.state_dict() }, path) print('Saved checkpoint at', path) if global_step % args.i_img == 0: img_i = np.random.choice(i_val) pose = poses[img_i, :3, :4] with torch.no_grad(): rgb, disp, acc, extras = neural_renderer.render(H, W, focal, c2w=pose) rgb_img = to8b(rgb.cpu().numpy()) imageio.imwrite( os.path.join(args.basedir, args.expname, f"{global_step}.png"), rgb_img) global_step += 1
def main(argsv = None, ext_logger=None): ## ## Hack -- set up the parser twice, so 'ambry --version' will work with no following command ## _first_arg_parse(argsv) ## ## Do it again. ## parser = argparse.ArgumentParser(prog='python -mdatabundles', description='Databundles {}. Management interface for ambry, libraries and repositories. '.format( __version__), prefix_chars='-+') parser.add_argument('-l', '--library', dest='library_name', default="default", help="Name of library, from the library secton of the config") parser.add_argument('-c', '--config', default=None, action='append', help="Path to a run config file") parser.add_argument('-v', '--version', default=None, action="store_true", help="Display version") parser.add_argument('--single-config', default=False, action="store_true", help="Load only the config file specified") cmd = parser.add_subparsers(title='commands', help='command help') from .library import library_parser, library_command from .warehouse import warehouse_command, warehouse_parser from .remote import remote_parser,remote_command from test import test_parser, test_command from config import config_parser, config_command from ckan import ckan_parser, ckan_command from source import source_command, source_parser from bundle import bundle_command, bundle_parser from root import root_command, root_parser library_parser(cmd) warehouse_parser(cmd) ckan_parser(cmd) source_parser(cmd) remote_parser(cmd) test_parser(cmd) config_parser(cmd) bundle_parser(cmd) root_parser(cmd) argsv = shlex.split(' '.join(argsv)) if argsv else None args = parser.parse_args(argsv) if args.version: import ambry import sys print ("Ambry {}".format(ambry.__version__)) sys.exit(0) if args.single_config: if args.config is None or len(args.config) > 1: raise Exception("--single_config can only be specified with one -c") else: rc_path = args.config elif args.config is not None and len(args.config) == 1: rc_path = args.config.pop() else: rc_path = args.config funcs = { 'bundle':bundle_command, 'library':library_command, 'warehouse':warehouse_command, 'remote':remote_command, 'test':test_command, 'ckan':ckan_command, 'source': source_command, 'config': config_command, 'root': root_command, } f = funcs.get(args.command, False) if args.command == 'config' and args.subcommand == 'install': rc = None else: rc = get_runconfig(rc_path) global logger if ext_logger: logger = ext_logger else: logger = get_logger("{}.{}".format(args.command, args.subcommand), template="%(message)s") logger.setLevel(logging.INFO) if not f: fatal("Error: No command: "+args.command) else: try: f(args, rc) except KeyboardInterrupt: prt('\nExiting...') pass
def main(argsv=None, ext_logger=None): import ambry._meta import os import sys parser = argparse.ArgumentParser( prog='ambry', description='Ambry {}. Management interface for ambry, libraries and repositories. '.format( ambry._meta.__version__)) parser.add_argument( '-l', '--library', dest='library_name', default="default", help="Name of library, from the library secton of the config") parser.add_argument( '-c', '--config', default=os.getenv(AMBRY_CONFIG_ENV_VAR), action='append', help="Path to a run config file. Alternatively, set the AMBRY_CONFIG env var") parser.add_argument( '--single-config', default=False, action="store_true", help="Load only the config file specified") parser.add_argument( '-E', '--exceptions', default=False, action="store_true", help="Show full exception trace on all exceptions") cmd = parser.add_subparsers(title='commands', help='command help') from .library import library_parser, library_command from .warehouse import warehouse_command, warehouse_parser from .remote import remote_parser, remote_command from test import test_parser, test_command from config import config_parser, config_command from ckan import ckan_parser, ckan_command from source import source_command, source_parser from bundle import bundle_command, bundle_parser from root import root_command, root_parser from ..dbexceptions import ConfigurationError library_parser(cmd) warehouse_parser(cmd) ckan_parser(cmd) source_parser(cmd) remote_parser(cmd) test_parser(cmd) config_parser(cmd) bundle_parser(cmd) root_parser(cmd) args = parser.parse_args() if args.single_config: if args.config is None or len(args.config) > 1: raise Exception( "--single_config can only be specified with one -c") else: rc_path = args.config elif args.config is not None and len(args.config) == 1: rc_path = args.config.pop() else: rc_path = args.config funcs = { 'bundle': bundle_command, 'library': library_command, 'warehouse': warehouse_command, 'remote': remote_command, 'test': test_command, 'ckan': ckan_command, 'source': source_command, 'config': config_command, 'root': root_command, } global global_logger if ext_logger: global_logger = ext_logger else: name = "{}.{}".format(args.command, args.subcommand) global_logger = get_logger(name, template="%(levelname)s: %(message)s") global_logger.setLevel(logging.INFO) f = funcs.get(args.command, False) if args.command == 'config' and args.subcommand == 'install': rc = None else: try: rc = get_runconfig(rc_path) except ConfigurationError: fatal( "Could not find configuration file at {}\nRun 'ambry config install; to create one ", rc_path) global global_run_config global_run_config = rc if not f: fatal("Error: No command: " + args.command) else: try: f(args, rc) except KeyboardInterrupt: prt('\nExiting...') pass except ConfigurationError as e: if args.exceptions: raise fatal("{}: {}".format(str(e.__class__.__name__), str(e)))
import config conf_path="../config/conf.ini" key_file = config.config_parser(conf_path) print key_file