def test_it_reads_given_config_file_from_environment_var(self, configparser): fixture_config = os.path.join(APP_BASEDIR, 'tests', 'fixtures', 'custom.conf.test') with patch.dict(os.environ, {'CONFIG_FILE': fixture_config}): generate_config() configparser.return_value.read.assert_called_once_with(fixture_config)
def generate_solr_configs(self, core_admin_conf=None): if len(self.indexes) > 1: generate_multicore_schema(core_admin_conf, **self.indexes) else: index = self.indexes.values()[0] index.solr_schema.generate() generate_config(index.solr_config)
def parse_args(): parser = argparse.ArgumentParser(description='Train face network') # general parser.add_argument('--dataset', default=default.dataset, help='dataset config') parser.add_argument('--network', default=default.network, help='network config') parser.add_argument('--loss', default=default.loss, help='loss config') args, rest = parser.parse_known_args() generate_config(args.network, args.dataset, args.loss) parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.') parser.add_argument('--pretrained', default=default.pretrained, help='pretrained model to load') parser.add_argument('--pretrained-epoch', type=int, default=default.pretrained_epoch, help='pretrained epoch to load') parser.add_argument( '--ckpt', type=int, default=default.ckpt, help= 'checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save' ) parser.add_argument( '--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches') parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate') parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing') parser.add_argument('--wd', type=float, default=default.wd, help='weight decay') parser.add_argument('--mom', type=float, default=default.mom, help='momentum') parser.add_argument('--frequent', type=int, default=default.frequent, help='') parser.add_argument('--kvstore', type=str, default=default.kvstore, help='kvstore setting') args = parser.parse_args() return args
def parse_args(): parser = argparse.ArgumentParser(description='Test a Fast R-CNN network') # general parser.add_argument('--network', help='network name', default=default.network, type=str) parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str) args, rest = parser.parse_known_args() generate_config(args.network, args.dataset) parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str) parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str) parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str) # testing parser.add_argument('--prefix', help='model to test with', default=default.rcnn_prefix, type=str) parser.add_argument('--epoch', help='model to test with', default=default.rcnn_epoch, type=int) parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int) # rcnn parser.add_argument('--vis', help='turn on visualization', action='store_true') parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float) parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true') parser.add_argument('--has_rpn', help='generate proposals on the fly', action='store_true') parser.add_argument('--proposal', help='can be ss for selective search or rpn', default='rpn', type=str) args = parser.parse_args() return args
def parse_args(): parser = argparse.ArgumentParser(description="Train face network") # general parser.add_argument("--network", default=default.network, help="network config") parser.add_argument("--loss", default=default.loss, help="loss config") parser.add_argument("--dataset", default=default.dataset, help="dataset") args, rest = parser.parse_known_args() generate_config(args.network, args.dataset, args.loss) args = parser.parse_args() return args
def parse_args(): parser = argparse.ArgumentParser( description='Test a Region Proposal Network') # general parser.add_argument('--network', help='network name', default=default.network, type=str) parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str) args, rest = parser.parse_known_args() generate_config(args.network, args.dataset) parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str) parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str) parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str) # testing parser.add_argument('--prefix', help='model to test with', default=default.rpn_prefix, type=str) parser.add_argument('--epoch', help='model to test with', default=default.rpn_epoch, type=int) # rpn parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int) parser.add_argument('--vis', help='turn on visualization', action='store_true') parser.add_argument('--thresh', help='rpn proposal threshold', default=0, type=float) parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true') args = parser.parse_args() return args
def main(args): # given program arguments, generate a config file config = cfg.generate_config(args) # if given a best state then we load it's config if args.state: logging.info('loading config from {}'.format(args.state)) best_state = torch.load(args.state) config = best_state['config'] # create a checkpoint directory model_dir = utl.generate_experiment_dir(args.model_dir, config, prefix_str='S3DIS-hilbert') # configure logger utl.configure_logger(model_dir, args.loglevel.upper()) # get Tensorboard writer object writer = utl.get_tensorboard_writer(log_dir=model_dir) train(config=config, model_dir=model_dir, writer=writer) # close Tensorboard writer writer.close()
def test_it_source_config_from_given_config_file(self): fixture_config = os.path.join(APP_BASEDIR, 'tests', 'fixtures', 'custom.conf.test') with patch.dict(os.environ, {'CONFIG_FILE': fixture_config}): config = generate_config() assert config['github_user'] == 'someuser' assert config['github_proxy'] == 'https://ghproxy.github.com' assert config['github_status_text'] == "Label requirements not satisfied" assert config['github_status_url'] == "https://somewhere_with_more_information.com"
def init_arguments(): """Initialize arguments replacing the default ones. """ parser = argparse.ArgumentParser( description='Main script-launcher for training of ZSL models') # general configs parser.add_argument('--model', default=config.model, help='Name of model to use for ZSL training.') parser.add_argument('--datasets', default=config.datasets, help='Name of datasets to use for ZSL training.') args, rest = parser.parse_known_args() # datasets = args.datasets.split(',') # for multiple datasets generate_config(parsed_model=args.model, parsed_datasets=args.datasets) # place here other arguments, that are not in config.py if necessary return parser
def parse_args(): parser = argparse.ArgumentParser(description='Test a Fast R-CNN network') # general parser.add_argument('--network', help='network name', default=default.network, type=str) parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str) args, rest = parser.parse_known_args() generate_config(args.network, args.dataset) # testing parser.add_argument('--prefix', help='model to test with', default=default.rcnn_prefix, type=str) parser.add_argument('--epoch', help='model to test with', default=default.rcnn_epoch, type=int) parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int) # rcnn parser.add_argument('--vis', help='turn on visualization', action='store_true') parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float) parser.add_argument('--image_name', help='image file path', type=str) args = parser.parse_args() return args
def test_inference_vs_train(self): self.assertTrue(False) # disable and auto fail this test for now tf.reset_default_graph() with tf.Session() as sess: conf = config.generate_config(keep_prob=1.0) conf['batch_size'] = 1 data = data_pipe.Data('./example_data/', conf['batch_size'], conf['max_word_len'], conf['max_line_len']) model, free_model = train.build_model(data, conf) data.initialize(sess, data.datadir + '*') sess.run(tf.tables_initializer()) sess.run(tf.global_variables_initializer()) (out_logits_4, src_sentence_3, src_sent_len_1, trg_sentence_3, trg_sent_len_1) = sess.run([model.out_logits_4, data.src.to_tensor(-1), data.src_sentence_len, data.trg.to_tensor(-1), data.trg_sentence_len]) src = data.array_to_strings(src_sentence_3)[0].replace(data.go_stop_token, '') trg = data.array_to_strings(trg_sentence_3)[0].replace(data.go_stop_token, '') # trg is the concatenation of itself with src. Restore the stop word that delimits them trg = trg[len(src):] trg = src + ' ' + data.go_stop_token + ' ' + trg.strip() # recombine src and trg print src print trg feed = {data.src_place:src, data.trg_place:trg} (free_logits_4, src_sentence_inference, trg_sentence_inference) = sess.run([free_model.out_logits_4, data.src_inference.to_tensor(-1), data.trg_inference.to_tensor(-1)], feed_dict=feed) # Get the fist batch line and trim potential batch padding from the model's logits out_logits_3 = out_logits_4[0, :free_logits_4.shape[1], :free_logits_4.shape[2], :] # Check that the model's outputs are the same regardless of what data pipeline is used self.assertTrue((np.abs(out_logits_3 - free_logits_4[0]) < 1e-5).all()) # Run the inference model as though generating one char at time, and check the outputs feed = {data.src_place:src, data.trg_place:''} # Start with no input free_logits_4 = sess.run(free_model.out_logits_4, feed_dict=feed) self.assertTrue((np.abs(free_logits_4[0,0,0,:] - out_logits_3[0,0,:]) <= 1e-5).all()) trg = trg.split() trg_so_far = '' for word_idx, trg_word in enumerate(trg): for chr_num in range(len(trg_word)): trg_so_far += trg_word[chr_num] feed = {data.src_place:src, data.trg_place:trg_so_far} free_logits_4 = sess.run(free_model.out_logits_4, feed_dict=feed) # print (free_logits_4[0, word_idx, chr_num + 1,:] - out_logits_3[word_idx, chr_num + 1, :]) < 1e-4 self.assertTrue((np.abs(free_logits_4[0, word_idx, chr_num + 1,:] - out_logits_3[word_idx, chr_num + 1, :]) <= 1e-5).all()) trg_so_far += ' '
def sess_setup(datadir, restore_dir, batch_size=1): conf = config.generate_config(keep_prob=1.0, noise_level=0) data = data_pipe.Data(datadir, batch_size, conf['max_word_len'], conf['max_line_len'], eval_mode=True) model, free_model = train.build_model(data, conf) sess = tf.Session() saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=1) sess.run(tf.tables_initializer()) sess.run(tf.global_variables_initializer()) data.initialize(sess, data.datadir + '*') saver.restore(sess, restore_dir) return model, free_model, data, sess
async def start(_): conf = config.generate_config( { 'unis': 'http://localhost:8888', 'remote': 'http://localhost:8888', 'topology': 'Local Topology', 'domain': 'ZOF Domain', 'of_port': 6653 }, APP.args.__dict__) APP.SDN = SDN_Handler(runtime_url=conf['unis'], domain_name=conf['domain']) APP.website = None warnings.filterwarnings('ignore') APP.logger.info("\nStarting ZOF Topology Controller\n \ Local UNIS: " + conf['unis'] + "\n \ Remote UNIS: " + conf['remote'] + "\n \ Remote Topology Name: " + conf['topology'] + "\n\ Domain Name: " + conf['domain']) # Registration check/update domain/topology resources. RegHandler = RegistrationHandler(APP.SDN.rt, conf['remote']) APP.logger.info("Checking Local Topology and Domain") local_topology = RegHandler.check_local_topology("Local Topology") local_domain = RegHandler.check_local_domain(conf['domain'], local_topology) APP.logger.info("Attempting to register domain to remote topology") remote_topology = RegHandler.register_remote(conf['topology'], local_domain) if not local_topology or not local_domain or not remote_topology: APP.logger.info( "ERROR - problem with startup local or remote registration.") else: APP.logger.info("Successfully registered topology and domain") RegHandler.clean_up() APP.SDN.domain_name = APP.args.domain APP.SDN.local_domain = local_domain
def create_app(): """Creates the primary flask app. Responsible for 1. Getting the configurations 2. Initializing the database 3. Adding an ES instance 4. Registering sub app blueprints Returns ------- Flask App The actual whole app """ # Set main flask app logging.info('Initializing application') app = Flask(__name__) # Set up app configs logging.info('Initializing configurations') new_configs = config.generate_config() app.config.update(new_configs) app.secret_key = app.config['FLASK_SECRET'] app.json_encoder = config.BetterEncoder logging.info('Registering blueprints') from .api import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api/v1') @app.errorhandler(404) def page_not_found(e): return jsonify(error=404, text=str(e)), 404 @app.errorhandler(500) def server_error(e): return jsonify(error=500, text=str(e)), 500 return app
def parse_args(): parser = argparse.ArgumentParser(description='Train parall face network') # general parser.add_argument('--dataset', default=default.dataset, help='dataset config') parser.add_argument('--network', default=default.network, help='network config') parser.add_argument('--loss', default=default.loss, help='loss config') args, rest = parser.parse_known_args() generate_config(args.network, args.dataset, args.loss) parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.') parser.add_argument('--pretrained', default=default.pretrained, help='pretrained model to load') parser.add_argument( '--ckpt', type=int, default=default.ckpt, help= 'checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save' ) parser.add_argument( '--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches') parser.add_argument('--max-steps', type=int, default=config.max_steps, help='max training batches') parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate') parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing') parser.add_argument('--wd', type=float, default=default.wd, help='weight decay') parser.add_argument('--mom', type=float, default=default.mom, help='momentum') parser.add_argument('--frequent', type=int, default=default.frequent, help='') parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='batch size in each context') parser.add_argument('--kvstore', type=str, default=default.kvstore, help='kvstore setting') parser.add_argument('--worker-id', type=int, default=0, help='worker id for dist training, starts from 0') parser.add_argument('--margin-policy', type=str, default='fixed', help='margin_m policy [fixed, step, linear]') args = parser.parse_args() return args
) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Train face alignment') # general parser.add_argument('--network', help='network name', default=default.network, type=str) parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str) args, rest = parser.parse_known_args() generate_config(args.network, args.dataset) parser.add_argument('--prefix', default=default.prefix, help='directory to save model.') parser.add_argument('--pretrained', default=default.pretrained, help='') parser.add_argument('--optimizer', default='nadam', help='') parser.add_argument('--lr', type=float, default=default.lr, help='') parser.add_argument('--wd', type=float, default=default.wd, help='') parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='') parser.add_argument('--lr-step', help='learning rate steps (in epoch)', default=default.lr_step, type=str)
def parse_args(): parser = argparse.ArgumentParser(description='Train face network') # general # 训练的数据集默认配置 parser.add_argument('--dataset', default=default.dataset, help='dataset config') # 默认网络结构选择 parser.add_argument('--network', default=default.network, help='network config') # 使用默认损失函数 parser.add_argument('--loss', default=default.loss, help='loss config') # 参数解析 args, rest = parser.parse_known_args() generate_config(args.network, args.dataset, args.loss) # 模型保存的目录 parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.') # 预训练模型加载 parser.add_argument('--pretrained', default=default.pretrained, help='pretrained model to load') # 指定与训练模型训练的epoch数 parser.add_argument('--pretrained-epoch', type=int, default=default.pretrained_epoch, help='pretrained epoch to load') # 是否保存ckpt文件 parser.add_argument( '--ckpt', type=int, default=default.ckpt, help= 'checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save' ) # 验证每verbose个批次进行一次验证 parser.add_argument( '--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches') # 学习率 parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate') parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing') # 学习率衰减的权重 parser.add_argument('--wd', type=float, default=default.wd, help='weight decay') # 梯度下降的动能 parser.add_argument('--mom', type=float, default=default.mom, help='momentum') parser.add_argument('--frequent', type=int, default=default.frequent, help='') # 每个GPU没批次训练的样本数目 parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='batch size in each context') # 键值存储的设置 parser.add_argument('--kvstore', type=str, default=default.kvstore, help='kvstore setting') args = parser.parse_args() return args
import sys import os sys.path.append(os.path.join('recognition', 'symbol')) sys.path.append(os.path.join('recognition')) import fmobilenetV3 from config import config, default, generate_config network = 'mv3' dataset = 'emore' loss = 'arcface' generate_config(network, dataset, loss) fc1 = eval(config.net_name).get_symbol()
def parse_args(): parser = argparse.ArgumentParser(description='Train Faster R-CNN network') # general parser.add_argument('--network', help='network name', default=default.network, type=str) parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str) args, rest = parser.parse_known_args() generate_config(args.network, args.dataset) parser.add_argument('--image_set', help='image_set name', default=default.image_set, type=str) parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str) parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str) # training parser.add_argument('--frequent', help='frequency of logging', default=default.frequent, type=int) parser.add_argument('--kvstore', help='the kv-store type', default=default.kvstore, type=str) parser.add_argument('--work_load_list', help='work load for different devices', default=None, type=list) parser.add_argument('--no_flip', help='disable flip images', action='store_true') parser.add_argument('--no_shuffle', help='disable random shuffle', action='store_true') parser.add_argument('--resume', help='continue training', action='store_true') # e2e parser.add_argument('--gpus', help='GPU device to train with', default='0', type=str) parser.add_argument('--pretrained', help='pretrained model prefix', default=default.pretrained, type=str) parser.add_argument('--pretrained_epoch', help='pretrained model epoch', default=default.pretrained_epoch, type=int) parser.add_argument('--prefix', help='new model prefix', default=default.e2e_prefix, type=str) parser.add_argument('--begin_epoch', help='begin epoch of training, use with resume', default=0, type=int) parser.add_argument('--end_epoch', help='end epoch of training', default=default.e2e_epoch, type=int) parser.add_argument('--lr', help='base learning rate', default=default.e2e_lr, type=float) parser.add_argument('--lr_step', help='learning rate steps (in epoch)', default=default.e2e_lr_step, type=str) parser.add_argument('--no_ohem', help='disable online hard mining', action='store_true') args = parser.parse_args() return args
def parse_args(): parser = argparse.ArgumentParser(description='Test a Faster R-CNN network') # general parser.add_argument('--network', help='network name', default=default.network, type=str) parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str) args, rest = parser.parse_known_args() generate_config(args.network, args.dataset) parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str) parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str) parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str) parser.add_argument('--method_name', help='method name for official WIDER toolbox', default='ESSH-R50', type=str) # testing parser.add_argument('--prefix', help='model to test with', default='model/essh-r50', type=str) parser.add_argument('--epoch', help='model to test with', default=0, type=int) parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int) parser.add_argument('--output', help='output folder', default=os.path.join('./output', 'essh-r50'), type=str) parser.add_argument('--pyramid', help='enable pyramid test', action='store_true') # rcnn parser.add_argument('--vis', help='turn on visualization', action='store_true') parser.add_argument('--thresh', help='valid detection threshold', default=0.02, type=float) parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true') parser.add_argument('--has_rpn', help='generate proposals on the fly', action='store_true', default=True) parser.add_argument('--proposal', help='can be ss for selective search or rpn', default='rpn', type=str) args = parser.parse_args() return args
def test_it_source_config_from_environment_with_missing_conf(self): with patch.dict(os.environ, {'CONFIG_FILE': '/conf/notexist', 'GITHUB_USER': '******'}): config = generate_config() assert config['github_user'] == 'ghuser'
def test_it_source_config_from_given_config_file(self): fixture_config = os.path.join(APP_BASEDIR, 'tests', 'fixtures', 'custom.conf.test') with patch.dict(os.environ, {'CONFIG_FILE': fixture_config}): config = generate_config() assert config['github_user'] == 'someuser'
def parse_args(): parser = argparse.ArgumentParser(description='Train face network') # general parser.add_argument('--dataset', default=default.dataset, help='dataset config') parser.add_argument('--network', default=default.network, help='network config') parser.add_argument('--loss', default=default.loss, help='loss config') args, rest = parser.parse_known_args() generate_config(args.network, args.dataset, args.loss) parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.') parser.add_argument('--pretrained', default='', help='pretrained model to load') parser.add_argument( '--ckpt', type=int, default=default.ckpt, help= 'checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save' ) parser.add_argument( '--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches') parser.add_argument('--max-steps', type=int, default=0, help='max training batches') parser.add_argument('--end-epoch', type=int, default=100000, help='training epoch size.') parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate') parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing') parser.add_argument('--wd', type=float, default=default.wd, help='weight decay') parser.add_argument('--mom', type=float, default=default.mom, help='momentum') parser.add_argument('--frequent', type=int, default=default.frequent, help='') parser.add_argument('--fc7-wd-mult', type=float, default=1.0, help='weight decay mult for fc7') parser.add_argument('--fc7-lr-mult', type=float, default=1.0, help='lr mult for fc7') parser.add_argument("--fc7-no-bias", default=False, action="store_true", help="fc7 no bias flag") parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='batch size in each context') parser.add_argument('--rand-mirror', type=int, default=1, help='if do random mirror in training') parser.add_argument('--cutoff', type=int, default=0, help='cut off aug') parser.add_argument('--color', type=int, default=0, help='color jittering aug') parser.add_argument('--images-filter', type=int, default=0, help='minimum images per identity filter') parser.add_argument('--ce-loss', default=False, action='store_true', help='if output ce loss') args = parser.parse_args() return args
def test_it_reads_given_config_file_from_environment_var(self, configparser): with patch.dict(os.environ, {'CONFIG_FILE': '/someconf'}): generate_config() configparser.return_value.read.assert_called_once_with('/someconf')
def parse_args(): parser = argparse.ArgumentParser(description='Train face network') # general parser.add_argument('--dataset', default=default.dataset, help='dataset config') parser.add_argument('--network', default=default.network, help='network config') parser.add_argument('--loss', default=default.loss, help='loss config') args, rest = parser.parse_known_args() generate_config(args.network, args.dataset, args.loss) # custom parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.') parser.add_argument('--pretrained', default=default.pretrained, help='pretrained model to load') parser.add_argument('--pretrained-epoch', type=int, default=default.pretrained_epoch, help='pretrained epoch to load') parser.add_argument( '--ckpt', type=int, default=default.ckpt, help= 'checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save' ) parser.add_argument( '--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches') parser.add_argument('--num-workers', type=int, default=default.num_workers, help='number of workers for data loading') parser.add_argument('--cos-lr', action='store_true', help='whether to use cosine lr schedule.') parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate') parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing') parser.add_argument('--end-epoch', type=int, default=default.end_epoch, help='number of training epochs (default: 120)') parser.add_argument('--frequent', type=int, default=default.frequent, help='Number of batches to wait before logging.') parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='batch size in each context') parser.add_argument('--kvstore', type=str, default=default.kvstore, help='kvstore setting') parser.add_argument('--opt', type=str, default=default.opt, help='optmizer name') parser.add_argument( '--no-wd', action='store_true', help= 'whether to remove weight decay on bias, and beta/gamma for batchnorm layers.' ) parser.add_argument('--selected-attributes', type=int, default=None) parser.add_argument( '--last-gamma', action='store_true', help= 'whether to init gamma of the last BN layer in each bottleneck to 0.') parser.add_argument('--freeze-block', type=int, default=0, help='whether to freeze the pre-layer for finetune') parser.add_argument( '--label-smoothing', action='store_true', help='use label smoothing or not in training. default is false.') parser.add_argument('--model-visual', action='store_true', help='visualize Neural Networks as computation graph.') args = parser.parse_args() return args
def test_it_reads_default_config_file(self, configparser, mock_os_path_exists): mock_os_path_exists.return_value = True generate_config() configparser.return_value.read.assert_called_once_with( os.path.join(APP_BASEDIR, 'custom.conf'))
def test_it_source_config_from_environment(self): with patch.dict(os.environ, {'GITHUB_USER': '******', 'GITHUB_PROXY': 'https://ghproxy.github.com'}): config = generate_config() assert config['github_user'] == 'ghuser' assert config['github_proxy'] == 'https://ghproxy.github.com'
def config(self): import bpsmap import config import pysam import pandas as pd import numpy as np parser = argparse.ArgumentParser( description='Generate localhap config for each individual') parser.add_argument('-f', '--sv-file', dest='sv_file', required=True, help='Individual SV file') parser.add_argument('-b', '--bam-file', dest='bam_file', required=True, help='Individual BAM file') # parser.add_argument('-S', '--seeksv', # dest='is_seeksv', # action='store_true', # help='Whether seeksv results') parser.add_argument('-m', '--bps-map', dest='bps_map', required=True, help='Breakpoint map file') # group.add_argument('-J', '--create-junc-db', # dest='new_junc_db', # help='New junction database') parser.add_argument('-j', '--junc-db', dest='junc_db', required=True, help='Junction database') parser.add_argument('-d', '--depth-tabix', dest='depth_file', required=True, help='Tabixed depth for counting supports') parser.add_argument('-C', '--chrom-info', dest='chrom_info', required=True, help='Chromosome information') parser.add_argument('-s', '--sample-name', dest='sample_name', required=True, help='Sample name') parser.add_argument('-r', '--region', dest='region', default=None, help='Region') parser.add_argument('-e', '--extension-bp', dest='ext', required=True, type=int, help='Extended bp for normal junctions') parser.add_argument('-p', '--ploidy', dest='ploidy', required=True, default=2, type=int, help='Extended bp for normal junctions') parser.add_argument('-c', '--out-config', dest='out_config', required=True, help='Output path of config') parser.add_argument('-g', '--segment', dest='seg', required=True, help='Output path of segment') parser.add_argument('-i', '--keep-imprecise', dest='keep_imprecise', action='store_true', default=False, help='Keep imprecise SV') parser.add_argument('-I', '--keep-insertions', dest='keep_insertions', action='store_true', default=False, help='Keep insertions') args = parser.parse_args(sys.argv[2:]) # chrom = args.region.split(':')[0] # start, end = [int(i) for i in args.region.split(':')[1].split('-')] bps_map = pd.read_csv(args.bps_map, sep='\t') chrom_infos = pd.read_csv(args.chrom_info, sep='\t') print('Reading SV') # if not args.is_seeksv: sv = bpsmap.read_sv(args.sv_file) # print(sv) # sv = bpsmap.get_precise_sv_svaba(sv, chrom, start, end) # n, nc, sv = bpsmap.merge_sv_tgs2sgs(sv, sv, 10) # else: # sv = bpsmap.get_precise_sv_seeksv(args.sv_file, chrom, start, end) sv = bpsmap.get_precise_sv(sv, drop_imprecise=not args.keep_imprecise, drop_insertions=not args.keep_insertions) print(bps_map) # config.map_bps_sv(sv, bps_map)# 不知道为什么注释掉就跑通了 config.map_bps_chrom_infos(chrom_infos, bps_map) sv = config.dedup(sv) segs = pd.DataFrame() id_start = 1 for row in chrom_infos.itertuples(): seg, id_start = config.segmentation( sv, row.chrom, row.start, row.end, id_start, drop_imprecise=not args.keep_imprecise, drop_insertions=not args.keep_insertions) segs = segs.append(seg) # segs = config.segmentation(sv, chrom, start, end) segs.to_csv(args.seg, index=False, sep='\t') bam = pysam.AlignmentFile(args.bam_file) depth_tabix = pysam.TabixFile(args.depth_file) # print('Calculating avg depth') # avg_depth = config.get_avg_depth(depth_tabix, chrom, start, end) # avg_depth = 300 print('Updating junc db') # if args.junc_db: # junc_db = pd.read_table(args.junc_db) # junc_db = config.update_junc_db_by_sv(sv, junc_db, depth_tabix) # junc_db = config.update_junc_db_by_seg(segs, junc_db, depth_tabix, chrom, start, end, bam, args.ext) # config.write_junc_db(args.junc_db, junc_db) # if args.new_junc_db: junc_db = pd.DataFrame(columns=[ 'chrom_5p', 'pos_5p', 'strand_5p', 'chrom_3p', 'pos_3p', 'strand_3p', 'count' ]) junc_db = config.update_junc_db_by_sv(sv, junc_db) # for chrom in segs.chrom.unique(): # chrom_info = chrom_infos.loc[lambda row: row.chrom == chrom].iloc[0] # seg = segs.loc[lambda row: row.chrom == chrom] junc_db = config.update_junc_db_by_seg_in_chrom( segs, junc_db, bam, args.ext) config.write_junc_db(args.junc_db, junc_db) config.generate_config(args.out_config, args.sample_name, sv, segs, depth_tabix, bam, ext=args.ext, ploidy=args.ploidy)
def test_it_reads_default_config_file(self, configparser): generate_config() configparser.return_value.read.assert_called_once_with( os.path.join(APP_BASEDIR, 'custom.conf'))
def configure(): from config import config, config_filename, generate_config if not config: logger.info('generating new config {}'.format(config_filename)) generate_config(config_filename) click.edit(filename=config_filename)
def generate(n, index, seed=1994): table = generate_table(n, seed) row = table.iloc[index, :] generate_config(row, fout='config.dat') generate_input(row, fout='simulation.in') return