def add_logging_arguments(ap: argparse.ArgumentParser): ap.add_argument('--loglevel', action='store', default='INFO', help="Log level", env_var='LOG_LEVEL') ap.add_argument('--logformatter', action='store', default='text', help="Log formatter", env_var='LOG_FORMATTER')
def add_fs_asset_store_arguments(ap: argparse.ArgumentParser): ap.add_argument('--asset-store-url-prefix', action='store', metavar='PREFIX', env_var='ASSET_STORE_URL_PREFIX', help='URL prefix of fs asset store (Only applicable for fs \ asset store') ap.add_argument('--asset-store-secret', action='store', metavar='PATH', env_var='ASSET_STORE_SECRET', help='Secret for signing assets on fs asset store')
def add_asset_arguments(ap: argparse.ArgumentParser): ap.add_argument('--asset-store', action='store', metavar='(fs|s3|cloud)', default='fs', env_var='ASSET_STORE', help='Type of asset store') ap.add_argument('--asset-store-public', action='store_true', help='Make asset public accessible', env_var='ASSET_STORE_PUBLIC') add_fs_asset_store_arguments(ap) add_s3_asset_store_arguments(ap) add_cloud_asset_store_arguments(ap)
def add_s3_asset_store_arguments(ap: argparse.ArgumentParser): ap.add_argument('--asset-store-access-key', action='store', metavar='KEY', env_var='ASSET_STORE_ACCESS_KEY', help='Access key for s3 asset store') ap.add_argument('--asset-store-secret-key', action='store', metavar='SECRET', env_var='ASSET_STORE_SECRET_KEY', help='Secret key for s3 asset store') ap.add_argument('--asset-store-region', action='store', metavar='REGION', env_var='ASSET_STORE_REGION', help='Region for s3 asset store') ap.add_argument('--asset-store-bucket', action='store', metavar='BUCKET', env_var='ASSET_STORE_BUCKET', help='Bucket name for s3 asset store') ap.add_argument('--asset-store-s3-url-prefix', action='store', metavar='PREFIX', env_var='ASSET_STORE_S3_URL_PREFIX', help='URL prefix for S3 asset store')
def _mk_thickness_parser(p : ArgumentParser): p.add_argument("--xfm-csv", dest="xfm_csv", type=str, #required=True, help="CSV file containing at least 'source', 'xfm', 'target', and 'resampled' columns") # FIXME p.add_argument("--label-mapping", dest="label_mapping", type=FileAtom, #required=True, help="CSV file containing structure information (see minclaplace/wiki/LaplaceGrid)") p.add_argument("--atlas-fwhm", dest="atlas_fwhm", type=float, required=True, # default ?! help="Blurring kernel (mm) for atlas") p.add_argument("--thickness-fwhm", dest="thickness_fwhm", type=float, required=True, # default?? help="Blurring kernel (mm) for cortical surfaces") return p
def add_static_asset_arguments(ap: argparse.ArgumentParser): ap.add_argument('--collect-assets', metavar='DIST', action='store', help="Collect static assets to a directory") ap.add_argument('--force-assets', action='store_true', help="Remove dist folder before proceeding") ap.add_argument('--serve-static-assets', action='store_true', env_var='SERVE_STATIC_ASSETS', help="Enable to serve static asset from plugin process") ap.add_argument('--ignore-public-html', action='store_true', env_var='IGNORE_PUBLIC_HTML', help="Ignore public_html directory for static assets.")
def add_cloud_asset_store_arguments(ap: argparse.ArgumentParser): ap.add_argument('--cloud-asset-host', action='store', metavar='HOST', env_var='CLOUD_ASSET_HOST', help='Host of cloud asset store') ap.add_argument('--cloud-asset-token', action='store', metavar='TOKEN', env_var='CLOUD_ASSET_TOKEN', help='Token of cloud asset store') ap.add_argument('--cloud-asset-public-prefix', action='store', metavar='PREFIX', env_var='CLOUD_ASSET_PUBLIC_PREFIX', help='URL prefix of public asset on cloud asset store') ap.add_argument('--cloud-asset-private-prefix', action='store', metavar='PREFIX', env_var='CLOUD_ASSET_PRIVATE_PREFIX', help='URL prefix of private asset on cloud asset store')
def add_skygear_arguments(ap: argparse.ArgumentParser): ap.add_argument('--skygear-address', metavar='ADDR', action='store', default='tcp://127.0.0.1:5555', help="Binds to this socket for skygear", env_var='SKYGEAR_ADDRESS') ap.add_argument('--skygear-endpoint', metavar='ENDPOINT', action='store', default='http://127.0.0.1:3000', help="Send to this addres for skygear handlers", env_var='SKYGEAR_ENDPOINT') ap.add_argument('--pubsub-url', action='store', default=None, env_var='PUBSUB_URL', help="The URL of the pubsub server, should start with " "ws:// or wss:// and include the path")
def add_app_arguments(ap: argparse.ArgumentParser): ap.add_argument('--apikey', metavar='APIKEY', action='store', default=None, help="API Key of the application", env_var='API_KEY') ap.add_argument('--masterkey', metavar='MASTERKEY', action='store', default=None, help="Master Key of the application", env_var='MASTER_KEY') ap.add_argument('--appname', metavar='APPNAME', action='store', default='', help="Application name of the skygear daemon", env_var='APP_NAME')
import logging import sys from configargparse import ArgumentParser from nlp_architect.models.np2vec import NP2vec from nlp_architect.utils.io import validate_existing_filepath, check_size logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger(__name__) if __name__ == "__main__": arg_parser = ArgumentParser(__doc__) arg_parser.add_argument( '--np2vec_model_file', default='conll2000.train.model', help='path to the file with the np2vec model to load.', type=validate_existing_filepath) arg_parser.add_argument( '--binary', help='boolean indicating whether the model to load has been stored in binary ' 'format.', action='store_true') arg_parser.add_argument( '--word_ngrams', default=0, type=int, choices=[0, 1], help='If 0, the model to load stores word information. If 1, the model to load stores ' 'subword (ngrams) information; note that subword information is relevant only to ' 'fasttext models.')
def add_debug_arguments(ap: argparse.ArgumentParser): ap.add_argument('--debug', action='store_true', help='Enable debugging features', env_var='DEBUG')
def add_plugin_arguments(ap: argparse.ArgumentParser): ap.add_argument('--subprocess', dest='subprocess', action='store', nargs='+', metavar=('(init|op|hook|handler|timer)', 'name'), help='Trigger subprocess everytime for debug') ap.add_argument('--http', action='store_true', help='Trigger http web server', env_var='HTTP') ap.add_argument('--http-addr', metavar='HTTP_ADDR', action='store', default='0.0.0.0:8000', help='Address where http web server listen to. In the \ format of {HOST}:{PORT}.', env_var='HTTP_ADDR') ap.add_argument('--zmq-thread-pool', metavar='ZMQ_THREAD_POOL', action='store', default=4, type=int, help='Number of thread in ZMQTransport thread pool', env_var='ZMQ_THREAD_POOL') ap.add_argument('--zmq-thread-limit', metavar='ZMQ_THREAD_LIMIT', action='store', default=10, type=int, help='Max number of thread in ZMQTransport thread pool', env_var='ZMQ_THREAD_LIMIT') ap.add_argument('modules', nargs='*', default=[]) # env_var: LOAD_MODULES
def add_logging_arguments(ap: argparse.ArgumentParser): ap.add_argument('--loglevel', action='store', default='INFO', help="Log level", env_var='LOG_LEVEL')
import sys from configargparse import ArgumentParser from nlp_architect.models.np2vec import NP2vec from nlp_architect.utils.io import check_size, validate_existing_filepath logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger(__name__) if __name__ == "__main__": arg_parser = ArgumentParser(__doc__) arg_parser.add_argument( '--corpus', default='train.txt', type=str, action=check_size(min=1), help='path to the corpus. By default, ' 'it is the training set of CONLL2000 shared task dataset.') arg_parser.add_argument( '--corpus_format', default='conll2000', type=str, choices=[ 'json', 'txt', 'conll2000'], help='format of the input marked corpus; txt, conll2000 and json formats are supported. ' 'For json format, the file should contain an iterable of sentences. ' 'Each sentence is a list of terms (unicode strings) that will be used for training.') arg_parser.add_argument(
import time from datetime import datetime, timezone from dateutil import parser as time_parser import docker from configargparse import ArgumentParser from prettylog import basic_config parser = ArgumentParser(default_config_files=[ os.path.join('/etc/docker-heal.conf'), ], auto_env_var_prefix='APP_') parser.add_argument( '-l', '--label', type=str, default='autoheal', help='container label' ) parser.add_argument( '-s', '--start-time', type=int, default=20, help='time for first container up' ) parser.add_argument( '-c', '--check-interval', type=int, default=30, help='check every seconds'
annot_dir (string): directory of annotations img_dir (string): directory of images root_dir (string): paths will be made relative to this directory ext (string, optional): image extension (default=.jpg) """ records = [] with open(index_file) as f: for img in f: tag = img.rstrip(os.linesep) image = os.path.join(image_dir, tag + '.jpg') annot = os.path.join(annot_dir, tag + '.json') assert os.path.exists(image), 'Path {} not found'.format(image) assert os.path.exists(annot), 'Path {} not found'.format(annot) records.append((os.path.relpath(image, root_dir), os.path.relpath(annot, root_dir))) np.savetxt(manifest_path, records, fmt='%s,%s') if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('--input_dir', required=True, help='path to directory with vocdevkit data') parser.add_argument('--output_dir', required=True, help='output directory') parser.add_argument('--overwrite', action='store_true', help='overwrite files') args = parser.parse_args() ingest_pascal(args.input_dir, args.output_dir, overwrite=args.overwrite)
aeon_config['iteration_mode'] = "ONCE" aeon_config['shuffle_manifest'] = True aeon_config['shuffle_enable'] = True aeon_config['random_seed'] = random_seed aeon_config['augmentation'][0]["center"] = False aeon_config['augmentation'][0]["flip_enable"] = True return wrap_dataloader(AeonDataLoader(aeon_config)) def make_validation_loader(manifest_file, manifest_root, backend_obj, subset_pct=100): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct) return wrap_dataloader(AeonDataLoader(aeon_config)) def make_tuning_loader(manifest_file, manifest_root, backend_obj): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct=20) aeon_config['shuffle_manifest'] = True return wrap_dataloader(AeonDataLoader(aeon_config)) if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--out_dir', required=True, help='Directory to write ingested files') parser.add_argument('--padded_size', type=int, default=40, help='Size of image after padding') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite files') args = parser.parse_args() ingest_cifar10(args.out_dir, args.padded_size, overwrite=args.overwrite)
splited = record.split(',') headerbody = "FILE\t" * len(splited) header = "@" + headerbody[:-1] + '\n' tmp_dest.write(header) record = record.replace(',', '\t') tmp_dest.write(record) for record in source: record = record.replace(',', '\t') tmp_dest.write(record) source.close() tmp_dest.close() if output_manifest is None: output_manifest = source_manifest if os.path.exists(output_manifest): os.remove(output_manifest) shutil.move(tmp_manifest, output_manifest) if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--manifest_file', required=True, help='Manifest to convert') parser.add_argument('--destination', help='Converted Manifest destination') args = parser.parse_args() convert_manifest(args.manifest_file, args.destination)
aeon_config['random_seed'] = random_seed if noise_file is not None: aeon_config['audio']['noise_index_file'] = noise_file aeon_config['audio']['noise_root'] = manifest_root aeon_config['audio']['add_noise_probability'] = 0.5 aeon_config['audio']['noise_level'] = [0.0, 0.5] return wrap_dataloader(AeonDataLoader(aeon_config, backend_obj)) def make_test_loader(manifest_file, manifest_root, backend_obj): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz) aeon_config['type'] = 'audio' # No labels provided aeon_config.pop('label', None) dl = AeonDataLoader(aeon_config, backend_obj) dl = TypeCast(dl, index=0, dtype=np.float32) return dl if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--input_dir', required=True, help='path to whale_data.zip') parser.add_argument('--out_dir', required=True, help='destination path of extracted files') args = parser.parse_args() generated_files = ingest_whales(args.input_dir, args.out_dir) print("Manifest files written to:\n" + "\n".join(generated_files))
aeon_config['image']['center'] = False aeon_config['image']['flip_enable'] = True return wrap_dataloader(AeonDataLoader(aeon_config, backend_obj)) def make_validation_loader(manifest_file, manifest_root, backend_obj, subset_pct=100): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct) return wrap_dataloader(AeonDataLoader(aeon_config, backend_obj)) def make_tuning_loader(manifest_file, manifest_root, backend_obj): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct=20) aeon_config['shuffle_manifest'] = True aeon_config['shuffle_every_epoch'] = True return wrap_dataloader(AeonDataLoader(aeon_config, backend_obj)) if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--out_dir', required=True, help='path to extract files into') parser.add_argument('--input_dir', default=None, help='unused argument') parser.add_argument('--padded_size', type=int, default=40, help='Size of image after padding (each side)') args = parser.parse_args() generated_files = ingest_cifar10(args.out_dir, args.padded_size) print("Manifest files written to:\n" + "\n".join(generated_files))
aeon_config['augmentation'].append(dict()) aeon_config['augmentation'][0]['type'] = "audio" aeon_config['augmentation'][0]['noise_index_file'] = noise_file aeon_config['augmentation'][0]['noise_root'] = os.path.dirname(noise_file) aeon_config['augmentation'][0]['add_noise_probability'] = 0.5 aeon_config['augmentation'][0]['noise_level'] = (0.0, 0.5) return wrap_dataloader(AeonDataLoader(aeon_config)) def make_test_loader(manifest_file, manifest_root, backend_obj): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz) aeon_config['type'] = 'audio' # No labels provided aeon_config.pop('label', None) dl = AeonDataLoader(aeon_config) dl = TypeCast(dl, index=0, dtype=np.float32) return dl if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--input_dir', required=True, help='path to whale_data.zip') parser.add_argument('--out_dir', required=True, help='destination path of extracted files') parser.add_argument('--overwrite', required=False, default=False, help='overwriting manifest') args = parser.parse_args() generated_files = ingest_whales(args.input_dir, args.out_dir, args.overwrite) print("Manifest files written to:\n" + "\n".join(generated_files))
@app.route('/') def aggregate_info(): return jsonify({'service': 'aggregate'}) @app.errorhandler(requests.RequestException) def http_error(error): response = jsonify({ 'status': 'error', 'code': 500, 'error': error.message }) response.status_code = 500 return response if __name__ == '__main__': parser = ArgumentParser(description='Runs the IP service.') parser.add_argument('--host', help='Specifies the host for the application.', default='127.0.0.1') parser.add_argument('--port', type=int, help='Specifies the port for the application.', default=5000) parser.add_argument('-u', '--users', help='Specifies the address to the users service.', required=True, env_var='USERS_URL') parser.add_argument('-i', '--ip', help='Specifies the address to the ip service.', required=True, env_var='IPDIAG_URL') arguments = parser.parse_args() users_endpoint = arguments.users ip_endpoint = arguments.ip app.run(host=arguments.host, port=arguments.port)
def make_loader(manifest_file, manifest_root, backend_obj, subset_pct=100, random_seed=0): aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct) aeon_config['shuffle_manifest'] = True aeon_config['shuffle_enable'] = True aeon_config['random_seed'] = random_seed aeon_config['augmentation'][0]['center'] = True aeon_config['augmentation'][0]['flip_enable'] = False return wrap_dataloader(AeonDataLoader(aeon_config)) if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('-o', '--out_dir', type=str, required=True, help='data path') parser.add_argument('-c', '--category', type=str, default="bedroom", help='data category') parser.add_argument('-s', '--dset', type=str, default="train", help='train, val or test') parser.add_argument('-t', '--tag', type=str, default="latest", help='version tag') parser.add_argument('-w', '--overwrite', action='store_true', help='overwrite existing data') parser.add_argument('-p', '--png', action='store_true', help='conversion to PNG images') args = parser.parse_args() assert os.path.exists(args.out_dir), "Output directory does not exist" categories = lsun_categories(args.tag) assert args.category in categories, "Unrecognized LSUN category: {}".format(args.category) # download and unpack LSUN data if not yet done so download_lsun(lsun_dir=args.out_dir, category=args.category, dset=args.dset, tag=args.tag, overwrite=args.overwrite) # ingest LSUN data for AEON loader if not yet done so manifest_file = ingest_lsun(lsun_dir=args.out_dir, category=args.category, dset=args.dset,
# write SSD VAL CONFIG ssd_config_val = get_ssd_config(img_reshape, True) ssd_config_path_val = os.path.join(root_dir, 'kitti_ssd_{}_val.cfg'.format(hw)) util.write_ssd_config(ssd_config_val, ssd_config_path_val, True) config_path = os.path.join(root_dir, 'kitti_{}.cfg'.format(hw)) config = {'manifest': '[train:{}, val:{}]'.format(train_manifest, val_manifest), 'manifest_root': root_dir, 'epochs': 100, 'height': img_reshape[0], 'width': img_reshape[0], 'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val) } util.write_config(config, config_path) if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--input_dir', required=True, help='path to dir with KITTI zip files.') parser.add_argument('--output_dir', required=True, help='path to unzip data.') parser.add_argument('--overwrite', action='store_true', help='overwrite files') parser.add_argument('--training_pct', default=90, help='fraction of data used for training.') parser.add_argument('--skip_unzip', action='store_true', help='skip unzip') args = parser.parse_args() ingest_kitti(args.input_dir, args.output_dir, train_percent=args.training_pct, overwrite=args.overwrite, skip_unzip=args.skip_unzip)
'epochs': 230, 'height': height, 'width': width, 'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val) } util.write_config(config, config_path) # write annotation pickle if annot_save is not None: pickle.dump(data, open(annot_save, 'w')) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('--data_dir', required=True, help='path to directory with vocdevkit data') parser.add_argument('--overwrite', action='store_true', help='overwrite files') parser.add_argument('--height', type=int, default=512, help='height of reshaped image') parser.add_argument('--width', type=int, default=512, help='width of reshape image') parser.add_argument('--train_fraction', type=float, default=0.9, help='width of reshape image') parser.add_argument('--annot_save', type=str, default=None, help='separately save annotations to this file.') args = parser.parse_args() cities = ['AOI_1_Rio', 'AOI_2_Vegas_Train', 'AOI_3_Paris_Train', 'AOI_4_Shanghai_Train', 'AOI_5_Khartoum_Train'] ingest_spacenet(cities=cities, data_dir=args.data_dir, height=args.height, width=args.width, overwrite=args.overwrite, annot_save=args.annot_save)