def test_flag_help_in_xml_space_separated_list(self): flags.DEFINE_spaceseplist('dirs', 'src libs bin', 'Directories to search.', flag_values=self.fv) expected_separators = sorted(string.whitespace) expected_output = ( '<flag>\n' ' <file>tool</file>\n' ' <name>dirs</name>\n' ' <meaning>Directories to search.</meaning>\n' ' <default>src libs bin</default>\n' ' <current>[\'src\', \'libs\', \'bin\']</current>\n' ' <type>whitespace separated list of strings</type>\n' 'LIST_SEPARATORS' '</flag>\n').replace('LIST_SEPARATORS', _list_separators_in_xmlformat(expected_separators, indent=' ')) self._check_flag_help_in_xml('dirs', 'tool', expected_output)
if __name__ == '__main__': utils.setup_tf() flags.DEFINE_float('wd', 0.02, 'Weight decay.') flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.') flags.DEFINE_float('smoothing', 0.001, 'Label smoothing.') flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.') flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.') flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.') flags.DEFINE_bool('custom_dataset', True, 'True if using a custom dataset.') flags.DEFINE_integer('nclass', 42, 'Number of classes present in custom dataset.') flags.DEFINE_integer('img_size', 32, 'Size of Images in custom dataset') flags.DEFINE_string('train_record', 'sketch-train.tfrecord', 'Name of training tfrecord.') flags.DEFINE_string('test_record', 'sketch-test.tfrecord', 'Name of test tfrecord.') flags.DEFINE_spaceseplist('valid_size', ['1'], 'List of different validation sizes.') flags.DEFINE_string( 'augment', 'custom', 'Type of augmentation to use, as defined in libml.data.py') FLAGS.set_default('dataset', 'cifar10') FLAGS.set_default('batch', 64) FLAGS.set_default('lr', 0.002) FLAGS.set_default('train_kimg', 1 << 16) app.run(main)
import numpy as np import util.logger from util.data_loader import DataLoader ################################################################################################################################ FLAGS = flags.FLAGS flags.DEFINE_string('input_name', '0_original', help='the name of input data') flags.DEFINE_string('output_name', None, help='the name of output data') flags.mark_flag_as_required('output_name') flags.DEFINE_spaceseplist('features', None, help='the selected features') flags.mark_flag_as_required('features') ################################################################################################################################ def main(_): if FLAGS.log_dir: os.makedirs(FLAGS.log_dir, exist_ok=True) logging.get_absl_handler().use_absl_log_file(FLAGS.task, FLAGS.log_dir) logging.debug('Arguments:') for k, v in FLAGS.flag_values_dict().items(): logging.debug(f'- {k}: {v}')
plt.show() PLOTS = {'plot': plot, 'image': ''} CONTEXTS = ['paper', 'notebook', 'talk', 'poster'] STYLES = ['whitegrid', 'darkgrid', 'ticks', 'white', 'dark'] PALETTES = ['deep', 'muted', 'bright', 'pastel', 'dark', 'colorblind'] FLAGS = flags.FLAGS flags.DEFINE_enum('plot', 'plot', PLOTS.keys(), 'Type of plot', short_name='p') flags.DEFINE_string('xlabel', 'x', 'Name of the x-label', short_name='x') flags.DEFINE_string('ylabel', 'y', 'Name of the y-label', short_name='y') flags.DEFINE_string('title', '', 'Plot title', short_name='t') flags.DEFINE_multi_string('src', None, 'Sources file', short_name='s') flags.DEFINE_string('sep', ',', 'Elements separator in src') flags.DEFINE_spaceseplist('names', None, 'Name of the columns', short_name='n', comma_compat=True) flags.DEFINE_enum('context', CONTEXTS[0], CONTEXTS, 'Context of the plot') flags.DEFINE_enum('style', STYLES[0], STYLES, 'Style of the plot') flags.DEFINE_string('font', 'sans-sherif', 'Font of the plot') flags.DEFINE_enum('palette', PALETTES[0], PALETTES, 'Color palette') flags.DEFINE_string('rc', None, 'file with parameter mappings of seaborn') def main(argv): PLOTS[FLAGS.plot]()
from __future__ import absolute_import, division, print_function, unicode_literals import collections import copy import json import math import os import re import data_support from absl import app, flags from nltk.tokenize import word_tokenize FLAGS = flags.FLAGS flags.DEFINE_spaceseplist("json_path", "data/furniture_raw_data.json", "JSON containing the dataset") flags.DEFINE_string("save_root", "data/", "Folder path to save extracted api annotations") flags.DEFINE_string( "metadata_path", "data/furniture_metadata.csv", "Path to the furniture metadata CSV", ) flags.DEFINE_enum( "subtask", "dominant-action", ["dominant-action", "multi-action"], "Selects output format; dominant-action (single action) and multi-action", ) # sub-tasks
'custom_cosmo', False, 'custom cosmology? If true, read in values for sigma8 and Omega_m, otherwise use Plmack15 as default' ) flags.DEFINE_float('Omega_m', 0.3089, 'total matter density', lower_bound=0.1, upper_bound=0.5) flags.DEFINE_float('sigma_8', 0.8158, 'amplitude of matter fluctuations', lower_bound=0.4, upper_bound=1.3) flags.DEFINE_boolean('PGD', False, 'whether to use PGD sharpening') flags.DEFINE_integer('B', 2, 'force resolution factor') flags.DEFINE_spaceseplist('zs_source', ['1.0'], 'source redshifts') flags.DEFINE_boolean('interpolate', False, 'whether to interpolate between snapshots') flags.DEFINE_boolean( 'debug', True, 'debug mode allows to run repeatedly with the same settings') flags.DEFINE_boolean( 'save3D', False, 'whether to dump the snapshots, requires interp to be set to False') flags.DEFINE_boolean( 'save3Dpower', False, 'whether to measure and save the power spectra of the snapshots') flags.DEFINE_boolean('vjp', False, 'whether to compute the vjp') flags.DEFINE_boolean('jvp', False, 'whether to compute the jvp') flags.DEFINE_boolean('forward', True, 'whether to run forward model') flags.DEFINE_boolean('analyze', False, 'whether to print out resource usage')
'shift-left', 'shift-right', 'riffle', 'unriffle', 'middle-char', 'remove-last', 'remove-last-two', 'echo-alternating', 'echo-half', 'length', 'echo-second-seq', 'echo-nth-seq', 'substring', 'divide-2', 'dedup'] FLAGS = flags.FLAGS flags.DEFINE_string( 'models_dir', '', 'Absolute path where results folders are found.') flags.DEFINE_string( 'exp_prefix', 'bf_rl_iclr', 'Prefix for all experiment folders.') flags.DEFINE_string( 'max_npe', '5M', 'String representation of max NPE of the experiments.') flags.DEFINE_spaceseplist( 'task_list', DEFAULT_TASKS, 'List of task names separated by spaces. If empty string, defaults to ' '`DEFAULT_TASKS`. These are the rows of the results table.') flags.DEFINE_string( 'model_types', str(DEFAULT_MODELS), 'String representation of a python list of 2-tuples, each a model_type + ' 'job description pair. Descriptions allow you to choose among different ' 'runs of the same experiment. These are the columns of the results table.') flags.DEFINE_string( 'csv_file', '/tmp/results_table.csv', 'Where to write results table. Format is CSV.') flags.DEFINE_enum( 'data', 'success_rates', ['success_rates', 'code'], 'What type of data to aggregate.') def make_csv_string(table):
from pathlib import Path import os import numpy as np import seaborn as sns import matplotlib.pyplot as plt from absl import flags, app from tensorboard.backend.event_processing.event_accumulator import EventAccumulator FLAGS = flags.FLAGS flags.DEFINE_spaceseplist( 'logdirs', [], 'Space separated list of directories to plot results from.') flags.DEFINE_string('output_file_name', 'out.pdf', 'Output file to generate plot.') flags.DEFINE_integer('seeds', 5, 'Number of seeds per run') def main(_): sns.color_palette() fig = plt.figure(figsize=(8, 4)) ax = fig.gca() print(FLAGS.logdirs) for logdir in FLAGS.logdirs: print(logdir) samples = [] rewards = [] for seed in range(FLAGS.seeds): logdir_ = Path(logdir) / f'seed{seed}' logdir_ = logdir_ / 'val' event_acc = EventAccumulator(str(logdir_)) event_acc.Reload() _, step_nums, vals = zip(*event_acc.Scalars('val-mean_reward'))
from typing import List from absl import app from absl import flags from absl import logging from valan.framework import hyperparam_flags from valan.r2r import custom_flags flags.DEFINE_integer('num_train_workers', 1, 'Number of workers for the train actor.') flags.DEFINE_integer('actors_per_train_worker', 1, 'Number of actors to run on a single train worker.') flags.DEFINE_spaceseplist( 'eval_data_source', '', 'A space-separated list of sources to read the data from. This is usually ' 'name(s) of the eval splits from which the actor reads the data, e.g., ' ' "val_seen val_unseen". NOTE: If each set of eval source contains ' 'multiple files, they can be separated by commas, e.g., ' ' "val_seen_1,val_seen2 val_unseen_1,val_unseen2" ') flags.DEFINE_spaceseplist( 'num_eval_workers', '1', 'Space-separated number of workers for each eval_data_source.') flags.DEFINE_integer('actors_per_eval_worker', 2, 'Number of actors to run on a single eval worker.') FLAGS = flags.FLAGS # Task specific dir where main functions (e.g., learner_main, actor_main) reside TASK_DIR = { 'R2R': 'r2r', 'NDH': 'r2r',
# Internal dependencies. from absl import app from absl import flags from absl import logging from dm_control.autowrap import binding_generator from dm_control.autowrap import codegen_util import six _MJMODEL_H = "mjmodel.h" _MJXMACRO_H = "mjxmacro.h" FLAGS = flags.FLAGS flags.DEFINE_spaceseplist( "header_paths", None, "Space-separated list of paths to MuJoCo header files.") flags.DEFINE_string("output_dir", None, "Path to output directory for wrapper source files.") def main(unused_argv): special_header_paths = {} # Get the path to the mjmodel and mjxmacro header files. # These header files need special handling. for header in (_MJMODEL_H, _MJXMACRO_H): for path in FLAGS.header_paths: if path.endswith(header): special_header_paths[header] = path
import re import shutil import boto3 import botocore from botocore.exceptions import ClientError from absl import app from absl import flags raw_bucket = 'recyclops' verified_file_dir = 'verified' image_base_dir = './images' saved_model_dir = 'savedModels' flags.DEFINE_spaceseplist( 'categories_list', 'aluminum compost glass paper plastic trash', 'List of categories to download images from', ) flags.DEFINE_bool( 'get_latest_model', True, 'Get the latest saved model from s3', ) def create_output_dir(dir_name): if(not os.path.isdir(dir_name) or not os.path.exists(dir_name)): print('Creating output directory: %s' % dir_name) try: os.mkdir(dir_name) except OSError:
help="Where to save JSONL with episodes and agent summary.\ File should have jsonl extension.") flags.DEFINE_integer(name="episodes", default=500, help="Number of games that the agent plays.", lower_bound=1) flags.DEFINE_float( name="survived_step_reward", default=0.1, help="Whether and how much to reward the agent for each survived step.", lower_bound=0.0) flags.DEFINE_spaceseplist(name="mlp_hidden_units", default=["256", "256"], help="Number of units for each hidden layer.") flags.DEFINE_float(name="learning_rate", default=0.00005, help="Size of the optimization step.", lower_bound=0.0) flags.DEFINE_float(name="discount_factor", default=0.95, help="How much future rewards are taken into account.", lower_bound=0.0) flags.DEFINE_float(name="epsilon", default=0.8, help="Epsilon value for epsilon-greedy.")
flags.DEFINE_float('learning_rate', default=1e-4, help='learning rate') flags.DEFINE_integer('batch_size',default=16, help='batch size') flags.DEFINE_integer('max_steps', default=500000, help='training steps') flags.DEFINE_integer('n_steps', default=5000, help='number of training steps after which to perform the evaluation') flags.DEFINE_enum('loss', 'AE', ['VAE','hybrid','AE'] , help='which objective to optimize') flags.DEFINE_boolean('output_images', default=True, help='whether to output image summaries') flags.DEFINE_boolean('full_sigma', default=True, help='whether to use constant or pixel-wise noise') flags.DEFINE_boolean('sigma_annealing', default=False, help='whether to run a scheduled beta annealing on the KL term (VAE only)') flags.DEFINE_boolean('beta_VAE', default=True,help='whether to run a beta VAE') flags.DEFINE_float('beta',default=120,help='beta paramater for beta VAE') flags.DEFINE_boolean('free_bits', default=False, help='whether to train a VAE with free bits') flags.DEFINE_float('lambda', default=0, help='free bits parameter') flags.DEFINE_boolean('C_annealing', default=True, help='whether to reduce available kl with training') flags.DEFINE_float('C', default=18, help='C parameter') flags.DEFINE_spaceseplist('augmentation', ['rot'], 'data augmentation types. Must be one or a list of the following: None, rot, flip, crop, bright') flags.DEFINE_float('rot_angle', 5., 'maximum rotation in degrees for data augmentation') flags.DEFINE_integer('latent_size',default=10, help='dimensionality of latent space') flags.DEFINE_string('activation', default='tanh', help='activation function') flags.DEFINE_integer('n_samples', default=16, help='number of samples for encoding') flags.DEFINE_enum('network_type', 'vae10', ['vae10','fully_connected','conv', 'infoGAN','resnet_fc','resnet_conv'], help='which type of network to use, currently supported: fully_conneted and conv') flags.DEFINE_integer('n_filt',default=32,help='number of filters to use in the first convolutional layer') flags.DEFINE_integer('dense_size', default=256, help='number of connnections in the fc resnet') flags.DEFINE_integer('n_layers',default=4, help='number of layers in the fc resnet') flags.DEFINE_boolean('bias', default=False, help='whether to use a bias in the convolutions') flags.DEFINE_float('dropout_rate', default=0, help='dropout rate used in infoGAN') flags.DEFINE_float('sigma', default=0.1, help='initial value of sigma in Gaussian likelihood') flags.DEFINE_integer('class_label', default=-1, help='number of specific class to train on. -1 for all classes') flags.DEFINE_string('tag', default='test', help='optional additional tag that is added to name of the run')
from botocore.exceptions import ClientError from signal import signal, SIGINT from absl import app from absl import flags WINDOW_NAME = "Recyclops" raw_bucket = 'recyclops' verified_file_dir = 'verified' file_type = '.jpg' FONT_TYPE = cv2.FONT_HERSHEY_SIMPLEX FONT_COLOR_DISPLAY = (81, 237, 14) FONT_COLOR_CATEGORY = (237, 181, 14) flags.DEFINE_spaceseplist( 'input_categories_list', 'aluminum compost glass paper plastic trash', 'List of catoagories to clean files from', ) flags.DEFINE_spaceseplist( 'output_categories_list', 'aluminum compost glass paper plastic trash invalid', 'List of categories to add cleaned files to', ) def exit_handler(signal_received, frame): print("Forced exit...") cv2.destroyAllWindows() exit(0)
'experiment run and plot clustering results.') # Flags for generating simulated clusters of LDSs. flags.DEFINE_boolean('generate_diagonalizable_only', False, 'Whether to only ' 'generate diagonalizable LDSs.') flags.DEFINE_integer('num_clusters', 2, 'Number of clusters in experiments.') flags.DEFINE_integer('num_systems', 100, 'Number of dynamical systems to cluster.') flags.DEFINE_integer('hidden_state_dim', 2, 'Hidden state dim in experiments.') flags.DEFINE_integer('input_dim', 1, 'Input dim in experiments.') flags.DEFINE_boolean( 'hide_inputs', True, 'Whether the inputs are observable ' 'to the clustering algorithm.') flags.DEFINE_spaceseplist( 'cluster_center_eigvalues', None, 'Optional List of lists of eigenvalues ' 'for each cluster. The outer list is space separated, and the inner list ' 'is comma separated. E.g. `0.9,0.1 0.5,0.1`. When null, generate random ' 'clusters centers by drawing eigenvalues uniformly from [-1, 1].') flags.DEFINE_float( 'cluster_center_dist_lower_bound', 0.2, 'Desired distance lower bound ' 'between cluster centers. Only applicable when cluster_center_eigvalues ' 'is None. Generate cluster centers until distance >= this val.') flags.DEFINE_float('cluster_radius', 0.05, 'Radius of each dynamical system cluster.') flags.DEFINE_integer('random_seed', 0, 'Random seed.') flags.DEFINE_integer('num_repeat', 1, 'Number of repeated runs for each fixed seq len.') # Flags for output sequences from LDSs. flags.DEFINE_integer('min_seq_len', 10, 'Min seq len in experiments.') flags.DEFINE_integer('max_seq_len', 1000, 'Max seq len in experiments.')
repeat=FLAGS.repeat) model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10) if __name__ == '__main__': utils.setup_tf() flags.DEFINE_float('wd', 0.02, 'Weight decay.') flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.') flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.') flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.') flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.') flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.') flags.DEFINE_bool('custom_dataset', True, 'True if using a custom dataset.') flags.DEFINE_integer('nclass', 42, 'Number of classes present in custom dataset.') flags.DEFINE_integer('img_size', 32, 'Size of Images in custom dataset') flags.DEFINE_spaceseplist('label_size', ['250', '1000', '2000', '124994'], 'List of different labeled data sizes.') flags.DEFINE_spaceseplist('valid_size', ['1', '500'], 'List of different validation sizes.') flags.DEFINE_string( 'augment', 'custom', 'Type of augmentation to use, as defined in libml.data.py') FLAGS.set_default('dataset', 'cifar10.3@250-5000') FLAGS.set_default('batch', 64) FLAGS.set_default('lr', 0.002) FLAGS.set_default('train_kimg', 1 << 16) app.run(main)
FLAGS = flags.FLAGS flags.DEFINE_enum('action', None, ['boot', 'start', 'mini_boot', 'ping', 'kill', 'info'], 'The action to perform against the emulator images') flags.DEFINE_string( 'skin', None, '[BOOT ONLY] The skin parameter to pass ' 'to the emulator') flags.DEFINE_string('density', None, '[bazel ONLY] Density of the lcd screen') flags.DEFINE_string( 'cache', None, '[bazel ONLY] Size of cache partition in mb ' '- currently not functioning') flags.DEFINE_string('vm_size', None, '[bazel ONLY] VM heap size in mb') flags.DEFINE_integer('memory', None, '[bazel ONLY] the memory for the emulator') flags.DEFINE_spaceseplist( 'system_images', None, '[bazel ONLY] the system ' 'images to boot the emulator with') flags.DEFINE_spaceseplist('apks', None, '[START ONLY] the apks to install') flags.DEFINE_spaceseplist('system_apks', None, '[START ONLY] system apks to ' 'install') flags.DEFINE_boolean( 'preverify_apks', False, '[START ONLY] if true the apks ' 'will be preverified upon install (normal production ' 'behaviour). It defaults to disabled because some of the ' 'verification failures are overkill in the bazel ' 'environment. For example it is very easy to have ' 'both a test apk and app apk contain a ref to the ' 'same class file (eg Maps from the guava jars) and ' 'this kills the verifier (out of the fear that the ' '2 apks have different class definitions and the ' 'optimizations in the app apk will bypass the test '
# #!/usr/bin/env python3 # -*- coding: utf-8 -*- # .setNear("America") import datetime from datetime import date # import GetOldTweets3 as got from manager.TweetCriteria import TweetCriteria from manager.TweetManager import TweetManager from tqdm import tqdm import time from absl import app, flags import os FLAGS = flags.FLAGS flags.DEFINE_string("hashtag", None, "hashtag name") flags.DEFINE_spaceseplist("start", "2020 1 1", "start date") flags.mark_flag_as_required("hashtag") def main(args): hashtag = FLAGS.hashtag y = int(FLAGS.start[0]) m = int(FLAGS.start[1]) d = int(FLAGS.start[2]) start = date(y, m, d) if not os.path.isdir(f'./{hashtag}'): os.mkdir(f'./{hashtag}') while (start < date.today()): since = start until = since
import cv2 import numpy as np import matplotlib.pyplot as plt from absl import app, flags, logging from absl.flags import FLAGS """ 计算 mAP """ flags.DEFINE_boolean('no_animation', True, 'no animation is shown') flags.DEFINE_boolean('no_plot', True, 'no plot is shown') flags.DEFINE_boolean('quiet', False, 'minimalistic console output') # e.g. python mAP.py -ignore "person book" flags.DEFINE_spaceseplist('ignore', None, 'ignore a list of classes') # e.g. python mAP.py -set_class_iou "person 0.7 book 0.6" flags.DEFINE_spaceseplist('set_class_iou', None, 'set IoU for a specific class') # 见 http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#4.4 MINOVERLAP = 0.5 ''' 0,0 -------------> x (width) | | (Left,Top) | *_________ | | | | | | y |_________| (height) *
# limitations under the License. """Generate conformer features to be used for training/predictions.""" import multiprocessing as mp import pickle from typing import List from absl import app from absl import flags import numpy as np # pylint: disable=g-bad-import-order import conformer_utils import datasets _SPLITS = flags.DEFINE_spaceseplist( 'splits', ['test'], 'Splits to compute conformer features for.') _OUTPUT_FILE = flags.DEFINE_string( 'output_file', None, required=True, help='Output file name to write the generated conformer features to.') _NUM_PROCS = flags.DEFINE_integer( 'num_parallel_procs', 64, 'Number of parallel processes to use for conformer generation.') def generate_conformer_features(smiles: List[str]) -> List[np.ndarray]: # Conformer generation is a CPU-bound task and hence can get a boost from # parallel processing.
flags.DEFINE_boolean('move', False, 'Move operation', short_name='m') flags.DEFINE_boolean('new', False, 'New operation', short_name='n') flags.DEFINE_boolean('remove', False, 'Remove operation', short_name='r') flags.DEFINE_boolean('rename', False, 'Rename operation.', short_name='rn') flags.DEFINE_boolean('summary', False, 'Print a summary of the task lists.', short_name='s') flags.DEFINE_boolean('toggle', False, 'Toggle operation', short_name='t') flags.DEFINE_boolean('quit', False, 'Quit operation', short_name='q') # Flags related to options on above operations. flags.DEFINE_integer('after', -1, 'The index of the task that this should be after') flags.DEFINE_string('date', '', 'A date in MM/DD/YYYY format.') flags.DEFINE_spaceseplist('index', '', 'Index of task.', short_name='i') flags.DEFINE_boolean('force', False, 'Forcibly perform the operation.', short_name='f') flags.DEFINE_boolean('color', True, 'Display output with terminal colors.', short_name='o') flags.DEFINE_string('note', '', 'A note to attach to a task.') flags.DEFINE_integer('parent', 0, 'Index of parent task.', short_name='p') flags.DEFINE_integer('tasklist', 0, 'Id of task list to operate on.') flags.DEFINE_string('title', '', 'The name of the task.') USAGE = ('[-a]dd, [-c]lear, [-d]elete, [-e]dit, [-r]emove task, [-m]ove, ' +
def test_write_help_in_xmlformat(self): fv = flags.FlagValues() # Since these flags are defined by the top module, they are all key. flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv) flags.DEFINE_integer('nb_iters', 17, 'An integer flag', lower_bound=5, upper_bound=27, flag_values=fv) flags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.', flag_values=fv) flags.DEFINE_boolean('use_gpu', False, 'Use gpu for performance.', flag_values=fv) flags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'], 'Compiler version to use.', flag_values=fv) flags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip', 'Files to process.', flag_values=fv) flags.DEFINE_list('allow_users', ['alice', 'bob'], 'Users with access.', flag_values=fv) flags.DEFINE_spaceseplist('dirs', 'src libs bins', 'Directories to create.', flag_values=fv) flags.DEFINE_multi_string('to_delete', ['a.cc', 'b.h'], 'Files to delete', flag_values=fv) flags.DEFINE_multi_integer('cols', [5, 7, 23], 'Columns to select', flag_values=fv) flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'], ['APPLE', 'BANANA', 'CHERRY'], 'Compilation flavour.', flag_values=fv) # Define a few flags in a different module. module_bar.define_flags(flag_values=fv) # And declare only a few of them to be key. This way, we have # different kinds of flags, defined in different modules, and not # all of them are key flags. flags.declare_key_flag('tmod_bar_z', flag_values=fv) flags.declare_key_flag('tmod_bar_u', flag_values=fv) # Generate flag help in XML format in the StringIO sio. sio = io.StringIO() if six.PY3 else io.BytesIO() fv.write_help_in_xml_format(sio) # Check that we got the expected result. expected_output_template = EXPECTED_HELP_XML_START main_module_name = sys.argv[0] module_bar_name = module_bar.__name__ if main_module_name < module_bar_name: expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR else: expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE expected_output_template += EXPECTED_HELP_XML_END # XML representation of the whitespace list separators. whitespace_separators = _list_separators_in_xmlformat( string.whitespace, indent=' ') expected_output = (expected_output_template % { 'basename_of_argv0': os.path.basename(sys.argv[0]), 'usage_doc': sys.modules['__main__'].__doc__, 'main_module_name': main_module_name, 'module_bar_name': module_bar_name, 'whitespace_separators': whitespace_separators }) actual_output = sio.getvalue() self.assertMultiLineEqual(expected_output, actual_output) # Also check that our result is valid XML. minidom.parseString # throws an xml.parsers.expat.ExpatError in case of an error. xml.dom.minidom.parseString(actual_output)
"""Trains an nltk language model.""" import random import pickle from typing import List, Tuple from nltk.lm.preprocessing import padded_everygram_pipeline from nltk.lm import Laplace from absl import app from absl import flags from tqdm import tqdm FLAGS = flags.FLAGS flags.DEFINE_string('string_to_normalize', None, 'the string to normalize') flags.DEFINE_string('language', None, 'the language to normalize') flags.DEFINE_spaceseplist('data_source', None, 'data source to preprocess') flags.DEFINE_string('pass_valid', "token", 'pass only valid tokens or sentences') flags.DEFINE_string('experiment', None, 'the normalization experiment to run') def main(argv): """Trains an nltk language model. Loads in files of normalized text, partitions them into a train partition (3/4 of data) and a test partition (last 1/4 of data). Uses Laplace smoothing for unseen ngrams. """ if len(argv) > 1: raise app.UsageError("Too many command-line arguments.")
"""Extract action API supervision for the SIMMC Fashion dataset. Author(s): Satwik Kottur """ from __future__ import absolute_import, division, print_function, unicode_literals from absl import flags from absl import app import ast import json import os FLAGS = flags.FLAGS flags.DEFINE_spaceseplist("json_path", "data/furniture_pilot_oct24.json", "JSON containing the dataset") flags.DEFINE_string("save_root", "data/", "Folder path to save extraced api annotations") flags.DEFINE_string("metadata_path", "data/fashion_metadata.json", "Path to fashion metadata") def extract_actions(input_json_file): """Extract action API for SIMMC fashion. Args: input_json_file: JSON data file to extraction actions """ print("Reading: {}".format(input_json_file)) with open(input_json_file, "r") as file_id: raw_data = json.load(file_id)
from fslks.experiments import Predictions, Task os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow_datasets.core.utils import gcs_utils from absl import flags from absl import app from absl import logging from fslks import tasks from fslks import experiments from fslks import evaluation FLAGS = flags.FLAGS flags.DEFINE_spaceseplist("training_tasks", [], "One or more tasks to be used for pretraining") flags.DEFINE_spaceseplist( "validation_tasks", [], "One or more tasks to be used for validation during pretraining") flags.DEFINE_spaceseplist( "testing_tasks", [], "One or more tasks to be used for evaluating pretrained models") flags.DEFINE_integer('num_epochs', 10, 'Number of epochs to train') flags.DEFINE_integer('warmup_epochs', 3, 'Number of warmup epochs before normal training') flags.DEFINE_integer('batch_size', 8, 'Batch size to use for training') flags.DEFINE_integer('prefetch_size', -1, 'Number of batches to prefetch (default: AUTOTUNE)') flags.DEFINE_integer('eval_batch_size', 8, 'Batch size to use when evaluating validation/test sets')
from absl import logging import tensorflow as tf tfk = tf.keras import bdlb from bdlb.core import plotting from baselines.diabetic_retinopathy_diagnosis.mc_dropout.model import VGGDrop from baselines.diabetic_retinopathy_diagnosis.ensemble_mc_dropout.model import predict ########################## # Command line arguments # ########################## FLAGS = flags.FLAGS flags.DEFINE_spaceseplist( name="model_checkpoints", default=None, help="Paths to checkpoints of the models.", ) flags.DEFINE_string( name="output_dir", default="/tmp", help="Path to store model, tensorboard and report outputs.", ) flags.DEFINE_enum( name="level", default="medium", enum_values=["realworld", "medium"], help="Downstream task level, one of {'medium', 'realworld'}.", ) flags.DEFINE_integer( name="batch_size",
from absl import app, flags from functools import partial import string flags.DEFINE_spaceseplist("features", None, "Features taken into account") flags.DEFINE_string("input", None, "Input path") flags.DEFINE_string("output", None, "Output path") flags.DEFINE_string("vocabulary", None, "vocabulary") flags.DEFINE_string("bases", None, "vocabulary") flags.DEFINE_string("supertags", None, "vocabulary") flags.DEFINE_string("suffixes", None, "vocabulary") flags.DEFINE_string("prefixes", None, "vocabulary") vocab = set() superbases = {} supertags = {} suffixes = set() prefixes = set() def _load_set(path, data_set): with open(path, "r") as ifile: for line in ifile: line = line if line[-1] != "\n" else line[:-1] data_set.add(line) def _load_dict(path, data_dict): with open(path, "r") as ifile: for line in ifile: line = line if line[-1] != "\n" else line[:-1]