FLAGS = flags.FLAGS
flags.DEFINE_string(
    "report", None,
    "The report whose datasets will be used to compute the predictions.")

flags.DEFINE_integer("batch_size", 32, "The batch size to use.")
flags.DEFINE_string("model_module", None,
                    "A python module file defining the model.")
flags.DEFINE_string(
    "model_args", "",
    "The arguments to be passed to the create() function of the model. Will "
    "be literal_eval'ed, should take the form a='1',b='2',c=3.")
flags.DEFINE_string("output_dir", None,
                    "Where to store the dense output. Can be anything that "
                    "can be open with tf.io, including Google cloud buckets.")
flags.DEFINE_bool("tf_on_cpu", True,
                  "If set, will hide accelerators from TF.")
flags.DEFINE_integer(
    "num_workers", 1,
    "How many instances of the script are launched to parallelize compute "
    "across datasets.")
flags.DEFINE_integer("worker_index", 0, "The zero-indexed id of this worker.")

flags.mark_flags_as_required(["model_module"])


def slice_per_worker(dictionary):
  """Equally partitions input over workers. Remainder is given to last worker.

  Args:
    dictionary: dict to split over.
Ejemplo n.º 2
0
    flags.DEFINE_boolean(
        'mask_zeros_in_client_updates', False,
        'Indicates whether to average client deltas with zero masking.')

with utils_impl.record_hparam_flags() as task_flags:
    # Task specification
    flags.DEFINE_enum('task', None, _SUPPORTED_TASKS,
                      'Which task to perform federated training on.')

with utils_impl.record_hparam_flags() as cifar100_flags:
    # CIFAR-100 flags
    flags.DEFINE_integer(
        'cifar100_crop_size', 24, 'The height and width of '
        'images after preprocessing.')
    flags.DEFINE_bool(
        'cifar100_distort_train_images', True, 'If set to True, '
        'train images will be randomly cropped. Otherwise, all '
        'images will simply be resized.')

with utils_impl.record_hparam_flags() as emnist_cr_flags:
    # EMNIST CR flags
    flags.DEFINE_enum(
        'emnist_cr_model', 'cnn', ['cnn', '2nn'], 'Which model to '
        'use. This can be a convolutional model (cnn) or a two '
        'hidden-layer densely connected network (2nn).')

with utils_impl.record_hparam_flags() as shakespeare_flags:
    # Shakespeare flags
    flags.DEFINE_integer(
        'shakespeare_sequence_length', 80,
        'Length of character sequences to use for the RNN model.')
import absl.logging as _logging  # pylint: disable=unused-import
import tensorflow as tf

import imagenet_input
import resnet_model
from tensorflow.contrib import summary
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.python.estimator import estimator

FLAGS = flags.FLAGS

flags.DEFINE_bool(
    'use_tpu', True,
    help=('Use TPU to execute the model for training and evaluation. If'
          ' --use_tpu=false, will use whatever devices are available to'
          ' TensorFlow by default (e.g. CPU and GPU)'))

# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
    'gcp_project', default=None,
    help='Project name for the Cloud TPU-enabled project. If not specified, we '
    'will attempt to automatically detect the GCE project from metadata.')

flags.DEFINE_string(
    'tpu_zone', default=None,
    help='GCE zone where the Cloud TPU is located in. If not specified, we '
    'will attempt to automatically detect the GCE project from metadata.')

flags.DEFINE_string(
Ejemplo n.º 4
0
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
from tensor2tensor.utils import video_metrics
import tensorflow as tf
import tf_slim as slim

FLAGS = flags.FLAGS

flags.DEFINE_bool("disable_ffmpeg", False,
                  "Disable FFMPEG when generating debug videos.")


def resize_video_frames(images, size):
    return [
        tf.to_int64(
            tf.image.resize_images(image, [size, size],
                                   tf.image.ResizeMethod.BILINEAR))
        for image in images
    ]


def video_augmentation(features, hue=False, saturate=False, contrast=False):
    """Augments video with optional hue, saturation and constrast.

  Args:
Ejemplo n.º 5
0
    "exec_code", None,
    "If present, no IPython shell is started but the code given in "
    "the flag is run instead (comparable to the -c option of "
    "IPython). The code will be able to use a predefined "
    "global 'grrapi' object.")

flags.DEFINE_string(
    "exec_file", None,
    "If present, no IPython shell is started but the code given in "
    "command file is supplied as input instead. The code "
    "will be able to use a predefined global 'grrapi' "
    "object.")

flags.DEFINE_bool(
    "version",
    default=False,
    allow_override=True,
    help="Print the API shell version number and exit immediately.")


def main(argv=None):
  del argv  # Unused.

  if flags.FLAGS.version:
    print("GRR API shell {}".format(config_server.VERSION["packageversion"]))
    return

  config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT)
  config.CONFIG.AddContext(contexts.CONSOLE_CONTEXT,
                           "Context applied when running the console binary.")
  server_startup.Init()
Ejemplo n.º 6
0
flags.DEFINE_integer("decoder_cell_size", 64, "Size of decoder cell.")

flags.DEFINE_enum("optimizer_type", "Adam", ["Adam"],
                  "Type of optimizer to use.")

flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")

flags.DEFINE_float("max_gradient_norm", 10.0, "Gradient clipping.")

flags.DEFINE_integer("max_decode_length", 200, "Max length to decode.")

flags.DEFINE_float("scheduled_sampling_prob", 0.0,
                   "Scheduled sampling probabiliy used during trainig.")

flags.DEFINE_bool("use_attention", False, "True to use attention.")

flags.DEFINE_float("input_keep_prob", 1.0, "Dropout input keep probability.")

flags.DEFINE_float("output_keep_prob", 1.0, "Dropout output keep probability.")

flags.DEFINE_float("state_keep_prob", 1.0, "Dropout state keep probability.")

flags.DEFINE_bool("use_mixer", False, "True to use MIXER loss.")

flags.DEFINE_integer("rl_start_step", None, "Step at which to apply MIXER.")

flags.DEFINE_integer("rl_anneal_steps", None, "Number of steps to anneal.")

# Input configurations.
flags.DEFINE_string("src_file", None, "Source input file.")
Ejemplo n.º 7
0
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter

from absl import app, flags, logging
from absl.flags import FLAGS

from models.PSMnet import PSMNet
from models.smoothloss import SmoothL1Loss
from dataloader.AsterLoader import AsterLoader
from misc.dependences.File_util import File_util

flags.DEFINE_integer('epoch', 300, 'epoch number')
flags.DEFINE_integer('batch_size', 4, 'batch size')
flags.DEFINE_float('lr', 0.001, 'learning rate')
flags.DEFINE_bool('is_cuda', False, 'whether cuda is used or not')
flags.DEFINE_bool('pre_trained', False, 'whether model is pretrained or not')
flags.DEFINE_string('optimizer', 'Adam', 'select optimizer')
flags.DEFINE_string('criterion', 'SmoothL1Loss', 'select criterion')
flags.DEFINE_float('validation_rate', 0.1,
                   'validation number rate when spliting dataset')
flags.DEFINE_string('tensor_board_log_dir', './tensorboard_log',
                    'tensorboardX logging folder')
flags.DEFINE_string('config_path', './configs/configs.yml', 'config file path')
flags.DEFINE_string('csv_path', './dataset/edit/result.csv',
                    'csv path for dataloader')
flags.DEFINE_string('save_path', './model.tar', 'model save path')
flags.DEFINE_integer('maxdisp', 192, 'max disparity')


def main(_argv):
Ejemplo n.º 8
0
import efficientnet_builder
import imagenet_input
import utils
from condconv import efficientnet_condconv_builder
from edgetpu import efficientnet_edgetpu_builder
from tpu import efficientnet_tpu_builder
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.estimator import estimator

FLAGS = flags.FLAGS

FAKE_DATA_DIR = 'gs://cloud-tpu-test-datasets/fake_imagenet'

flags.DEFINE_bool(
    'use_tpu',
    default=True,
    help=('Use TPU to execute the model for training and evaluation. If'
          ' --use_tpu=false, will use whatever devices are available to'
          ' TensorFlow by default (e.g. CPU and GPU)'))

# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
    'tpu',
    default=None,
    help='The Cloud TPU to use for training. This should be either the name '
    'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.'
)

flags.DEFINE_string(
    'gcp_project',
    default=None,
    help='Project name for the Cloud TPU-enabled project. If not specified, we '
Ejemplo n.º 9
0
def define_flags() -> List[str]:
  """Define common flags."""
  predefined_flags = set(FLAGS)

  flags.DEFINE_string('experiment_name', None, 'Name of this experiment.')

  # Flags relating to setting up the job.
  flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
  flags.DEFINE_enum(
      'mode',
      'train_and_eval',
      ['train', 'eval', 'train_and_eval'],
      'Whether to execute train and/or eval.')
  flags.DEFINE_bool('use_tpu', False, 'Whether to run on CPU or TPU.')

  # Flags relating to the training/eval loop.
  flags.DEFINE_integer(
      'checkpoint_step', -1, 'Step of the checkpoint to restore from.')
  flags.DEFINE_enum(
      'dataset_name',
      None,
      datasets.get_dataset_names(),
      'Name of the dataset to use.')
  flags.DEFINE_integer(
      'eval_frequency',
      None,
      'How many steps between evaluating on the (validation and) test set.')
  flags.DEFINE_string('output_dir', None, 'Base output directory.')
  flags.DEFINE_enum(
      'model_name',
      None,
      models.get_model_names(),
      'Name of the model to use.')
  flags.DEFINE_integer(
      'log_frequency',
      100,
      'How many steps between logging the metrics.')
  flags.DEFINE_integer('train_steps', None, 'How many steps to train for.')

  # Hyperparamter flags.
  flags.DEFINE_integer('batch_size', None, 'Training batch size.')
  flags.DEFINE_integer('eval_batch_size', None, 'Validation/test batch size.')
  flags.DEFINE_float('learning_rate', None, 'Learning rate.')
  flags.DEFINE_string(
      'learning_rate_schedule',
      'constant',
      'Learning rate schedule to use.')
  flags.DEFINE_string('optimizer', 'adam', 'Optimizer to use.')
  flags.DEFINE_float('optimizer_hparams_beta_1', 0.9, 'Adam beta_1.')
  flags.DEFINE_float('optimizer_hparams_beta_2', 0.999, 'Adam beta_2.')
  flags.DEFINE_float('optimizer_hparams_epsilon', 1e-7, 'Adam epsilon.')
  flags.DEFINE_float('weight_decay', 0.0, 'Weight decay.')
  flags.DEFINE_integer('seed', 42, 'Random seed.')
  flags.DEFINE_float(
      'validation_percent',
      0.0,
      'Percent of training data to hold out and use as a validation set.')

  # Loss function related flags.
  flags.DEFINE_enum('loss_name', None,
                    enum_values=['crossentropy', 'dm_loss', 'one_vs_all',
                                 'focal_loss'],
                    help='Loss function')
  flags.DEFINE_float('dm_alpha', 1.0, 'DM Alpha parameter.')
  flags.DEFINE_float('focal_gamma', 3.0, 'Gamma parameter for focal loss.')
  flags.DEFINE_bool('distance_logits', False,
                    'Whether to use a distance-based last layer.')

  flags.mark_flag_as_required('dataset_name')
  flags.mark_flag_as_required('experiment_name')
  flags.mark_flag_as_required('loss_name')
  flags.mark_flag_as_required('model_name')

  all_flags = set(FLAGS)
  program_flag_names = sorted(list(all_flags - predefined_flags))
  return program_flag_names
Ejemplo n.º 10
0
flags.DEFINE_integer(
    'diskspd_stride_or_alignment', 64,
    'If the access pattern is sequential, then this value'
    'means the stride for the access'
    'If the access pattern is random, then this value means'
    'the specified number of bytes that random I/O aligns to.'
    'Defaults: 64K. Unit: KB, can be set')

flags.DEFINE_enum(
    'diskspd_stride_or_alignment_unit', 'K', ['K', 'M', 'G', 'b'],
    'The unit of the stride_or_alignment,'
    'available option: K|M|G|b'
    'Defaults: K.')

flags.DEFINE_bool('diskspd_large_page', False,
                  'Whether use large page for IO buffers. '
                  'Defaults: False')

flags.DEFINE_bool('diskspd_latency_stats', False,
                  'Whether measure the latency statistics'
                  'Defaults: False')

flags.DEFINE_bool(
    'diskspd_disable_affinity', False, 'Whether to diable the group affinity,'
    'group affinity is to round robin tasks. '
    'across processor group. '
    'Defaults: False')

flags.DEFINE_bool('diskspd_write_through', True,
                  'Whether to enable write through IO. '
                  'Defaults: True')
Ejemplo n.º 11
0
## Other parameters

flags.DEFINE_string(
    "init_checkpoint", None,
    "Initial checkpoint, usually from a pre-trained BERT model. In the case of "
    "exporting, one can optionally provide path to a particular checkpoint to "
    "be exported here.")
flags.DEFINE_integer(
    "max_seq_length",
    128,  # contain CLS and SEP
    "The maximum total input sequence length after WordPiece tokenization. "
    "Sequences longer than this will be truncated, and sequences shorter than "
    "this will be padded.")

flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool("do_export", False, "Whether to export a trained model.")
flags.DEFINE_bool("eval_all_checkpoints", False,
                  "Run through all checkpoints.")
flags.DEFINE_integer(
    "eval_timeout", 600,
    "The maximum amount of time (in seconds) for eval worker to wait between "
    "checkpoints.")

flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 3e-5,
                   "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
Ejemplo n.º 12
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts KITTI multiview extension data to the TFRecords format."""

import os
from absl import app
from absl import flags
import tensorflow as tf

FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '.', 'Dataset folder.')
flags.DEFINE_integer('height', 384, '')
flags.DEFINE_integer('width', 1280, '')
flags.DEFINE_bool('entire_sequence', False,
                  'Train on the full sequence, otherwise skip frames 9-12.')


def bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


def int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def convert_dataset(data_dir):
    """Convert the data to the TFRecord format."""

    for subdir in ['training', 'testing']:
Ejemplo n.º 13
0
import numpy as np
import six

import tensorflow as tf
import tensorflow_gan as tfgan

from tensorflow_gan.python.contrib_utils import dimension_value

# Private functions to test.
from tensorflow_gan.python.estimator.tpu_gan_estimator import get_eval_estimator_spec
from tensorflow_gan.python.estimator.tpu_gan_estimator import get_predict_estimator_spec
from tensorflow_gan.python.estimator.tpu_gan_estimator import get_train_estimator_spec
from tensorflow_gan.python.estimator.tpu_gan_estimator import LossFns
from tensorflow_gan.python.estimator.tpu_gan_estimator import Optimizers

flags.DEFINE_bool('use_tpu', False, 'Whether to run test on TPU or not.')

TpuRunConfig = tf.compat.v1.estimator.tpu.RunConfig
CrossShardOptimizer = tf.compat.v1.tpu.CrossShardOptimizer
TPUEstimatorSpec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec


class TestOptimizerWrapper(tf.compat.v1.train.Optimizer):
    """An optimizer wrapper that is designed to share a real optimizer.

  The idea is that multiple instances of this class can share the real optimizer
  and this class will keep track of which steps executed on the real optimizer
  were executed by which instance of the wrapper class. This is useful for
  testing that the order of generator and discriminator steps is as desired.

  This optimizer also has an assertion that two consecutive substeps do not
Ejemplo n.º 14
0
flags.DEFINE_float('weight_decay', 1e-4, 'L2 regularization weight.')
flags.DEFINE_integer(
    'max_attempts', 5,
    'Maximum number of times to try training and evaluating an individual'
    ' before aborting.')
flags.DEFINE_list(
    'intermediate_evaluations', ['0.5'],
    'Intermediate evaluations relative to --train_epochs. For example, to'
    ' evaluate the model at 1/4, 1/2, 3/4 of the total epochs, use [0.25, 0.5,'
    ' 0.75]. An evaluation is always done at the start and end of training.')
flags.DEFINE_integer(
    'num_repeats', 3,
    'Number of repeats evaluated for each model in the space.')

# TPU flags
flags.DEFINE_bool('use_tpu', False, 'Use TPUs for train and evaluation.')
#    'use_tpu', True, 'Use TPUs for train and evaluation.')
flags.DEFINE_integer('tpu_iterations_per_loop', 100,
                     'Iterations per loop of TPU execution.')
flags.DEFINE_integer('tpu_num_shards', 2,
                     'Number of TPU shards, a single TPU chip has 2 shards.')

# Random sampling flags
flags.DEFINE_integer('number_of_steps', 1,
                     'Number of batches used to evaluate the random sample.')
flags.DEFINE_integer(
    'max_samples', 200,
    'Number of random samples taken to evaluate the performance.')


def build_config():
Ejemplo n.º 15
0
flags.DEFINE_float('one_minus_momentum', 0.1, 'Optimizer momentum.')
flags.DEFINE_float('l2', 1e-4, 'L2 coefficient.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string(
    'output_dir', '/tmp/imagenet', 'The directory where the model weights and '
    'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 90, 'Number of training epochs.')
flags.DEFINE_integer(
    'checkpoint_interval', 25,
    'Number of epochs between saving checkpoints. Use -1 to '
    'never save checkpoints.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')

# Mixup-related flags.
flags.DEFINE_float('mixup_alpha', 0., 'Coefficient of mixup distribution.')
flags.DEFINE_bool('same_mix_weight_per_batch', False,
                  'Whether to use a single mix weight across the batch.')
flags.DEFINE_bool(
    'use_random_shuffling', False,
    'Whether to use random shuffling to pair the points of mixup'
    'within a batch.')
flags.DEFINE_bool(
    'use_truncated_beta', True, 'Whether to sample the mixup weights from '
    'Beta[0,1](alpha,alpha) or from the truncated distribution '
    'Beta[1/2,1](alpha,alpha).')
flags.DEFINE_float(
    'train_proportion', 1.0,
    'What proportion of the training set to use to train versus validate on.')

# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', True, 'Whether to use mixed precision.')
Ejemplo n.º 16
0
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags

import tensorflow as tf

from utils import raw_data_utils

from absl import app
from absl import flags

FLAGS = flags.FLAGS

flags.DEFINE_bool("separate_doc_by_newline", False, "")

flags.DEFINE_string("output_data_dir", None, "")

flags.DEFINE_string("sub_set", "unsup_in", "")

flags.DEFINE_string("task_name", "IMDB", "")

flags.DEFINE_string("raw_data_dir", "IMDB", "")


def dump_raw_examples(examples, separate_doc_by_newline):
    """dump raw examples."""
    tf.logging.info("dumpping raw examples")
    text_path = os.path.join(FLAGS.output_data_dir, "text.txt")
    label_path = os.path.join(FLAGS.output_data_dir, "label.txt")
Ejemplo n.º 17
0
# Model specific paramenters
flags.DEFINE_string(
    'eval_master',
    default='',
    help='GRPC URL of the eval master. Set to an appropriate value when running'
    ' on CPU/GPU')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
                  'Training: gpus for multi-gpu, if None, use TF default.')

flags.DEFINE_integer('num_cores',
                     default=8,
                     help='Number of TPU cores for training')

flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
    'use_xla', False,
    'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
    'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')

flags.DEFINE_string(
    'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
    ' containing attributes to use as hyperparameters.')
flags.DEFINE_integer('batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
                     'evaluation.')
flags.DEFINE_integer('iterations_per_loop', 100,
                     'Number of iterations per TPU training loop')
flags.DEFINE_string(
Ejemplo n.º 18
0
from absl import flags

FLAGS = flags.FLAGS

flags.DEFINE_string('framework', 'ros',
                    'Execution framework to use: ros | ray.')
flags.DEFINE_bool('replay', False,
                  ('True if run in replay mode, otherwise run '
                   'Carla in server mode using `./CarlaUE4.sh -carla-server`'))
flags.DEFINE_string('log_file_name', None, 'Name of the log file')
flags.DEFINE_string('csv_log_file_name', None,
                    'csv file into which to log runtime stats')
flags.DEFINE_bool('fail_on_message_loss', True,
                  'True to enable operator failure when messages are lost')
flags.DEFINE_bool('ground_agent_operator', True,
                  'True to use the ground truth controller')

# Sensors to enable.
flags.DEFINE_bool('lidar', False, 'True to enable the lidar sensor')

# Modules to enable.
flags.DEFINE_bool('segmentation_drn', False,
                  'True to enable DRN segmantation operator')
flags.DEFINE_string('segmentation_drn_model_path',
                    'dependencies/models/drn_d_22_cityscapes.pth',
                    'Path to the model')
flags.DEFINE_bool('segmentation_dla', False,
                  'True to enable DLA segmantation operator')
flags.DEFINE_string('segmentation_dla_model_path',
                    'dependencies/dla/DLASeg.pth', 'Path to the model')
flags.DEFINE_bool('segmentation_gpu', True,
Ejemplo n.º 19
0
from absl import logging
import tensorflow as tf
import numpy as np

import imagenet_input

try:
  import h5py as _  # pylint: disable=g-import-not-at-top
  HAS_H5PY = True
except ImportError:
  logging.warning('`h5py` is not installed. Please consider installing it '
                  'to save weights for long-running training.')
  HAS_H5PY = False


flags.DEFINE_bool('use_tpu', True, 'Use TPU model instead of CPU.')
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_string('data', None, 'Path to training and testing data.')

FLAGS = flags.FLAGS

# TODO(xiejw): Revert the PER_CORE_BATCH_SIZE to 128 once the model can run.
PER_CORE_BATCH_SIZE = 64
NUM_CLASSES = 1000
IMAGE_SIZE = 224
APPROX_IMAGENET_TRAINING_IMAGES = 1280000  # Approximate number of images.
APPROX_IMAGENET_TEST_IMAGES = 48000  # Approximate number of images.

WEIGHTS_TXT = '/tmp/resnet50_weights.h5'

Ejemplo n.º 20
0
from tensorflow.contrib.slim.nets import inception

from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer

flags.DEFINE_float('learning_rate', 0.02, 'Learning rate.')
flags.DEFINE_float('depth_multiplier', 1.0, 'Depth Multiplier on Inception')
flags.DEFINE_integer(
    'train_steps', 800,
    'Total number of steps. Note that the actual number of '
    'steps is the next multiple of --iterations greater '
    'than this value.')
flags.DEFINE_integer('save_checkpoints_secs', None,
                     'Seconds between checkpoint saves')
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs')
flags.DEFINE_string('use_data', 'fake', 'Data from "fake","real"')
flags.DEFINE_string('data_dir', '', 'Path of the data (for use_data=real)')
flags.DEFINE_string('master', 'local',
                    'BNS name of the TensorFlow master to use.')
flags.DEFINE_string('model_dir', None, 'Estimator model_dir')
flags.DEFINE_integer('iterations', 40,
                     'Number of iterations per TPU training loop.')
flags.DEFINE_string('optimizer', 'momentum',
                    'optimizer (one of sgd, rms, momentum)')
flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU chips).')
flags.DEFINE_integer('batch_size', 64,
                     'Global batch_size, not the per-shard batch_size')
flags.DEFINE_integer('num_labels', 1024, 'number of classes to distinguish')
flags.DEFINE_integer('width', 304, 'width of input image')
flags.DEFINE_integer('height', 304, 'height of input image')
Ejemplo n.º 21
0
from lingvo.jax import py_utils
from lingvo.jax import train
import tensorflow.compat.v2 as tf

# Required import to setup work units when running through XManager.

FLAGS = flags.FLAGS

flags.DEFINE_string('model', None, 'Lingvo Jax model name.')
flags.DEFINE_string('job_log_dir', None,
                    'Directory where all experiment assets will be stored.')
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'decode', 'decode_once'],
                  'Flag to control which job is called.')
flags.DEFINE_bool(
    'eval_on_test', False, 'If True, then the training loop '
    'includes a full evaluation on all the test set splits. '
    'This can be set to True if we do not want an additional job '
    'to run continuous eval.')
flags.DEFINE_bool(
    'multi_host_checkpointing', False,
    'Whether to use multi-host checkpointing or not. Only useful for '
    'multi-host SPMD models.')
flags.DEFINE_bool(
    'maybe_use_persistence_checkpointing', False,
    'If suitable, will try to rely on persistence-based checkpointing rather '
    'than Flax-based checkpointing for SPMD models.')
flags.DEFINE_string(
    'checkpoint_todelete_subdir', None,
    'If set, checkpoints to be deleted will be only renamed into a '
    'subdirectory with the provided string. Otherwise, they will be directly '
    'deleted from the file system. Useful if checkpoint deletion is time '
Ejemplo n.º 22
0
LOGGER = logging.getLogger("dummy-gen")

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'project_yml_path', None,
    'the path of the project.yml, normally it is stored on the top of the project.'
)

flags.DEFINE_string(
    'table_suffix', None,
    'suffix appended to the model tables (not overwrite, create views with this suffix)'
)

flags.DEFINE_bool('refresh', False, 'whether to refresh this version of data, not recommended, better to change "data_id".')

class Core:
  PROJ_CONF = parse_helper.parse_project_yml(FLAGS.project_yml_path)
  PROJ_ROOT_PATH = os.path.join(os.getcwd, os.path.dirname(__file__))
  MODEL_PATH = os.path.join(PROJ_ROOT_PATH, PROJ_CONF['models'])
  SEED_PATH = os.path.join(PROJ_ROOT_PATH, PROJ_CONF['seed'])
  STATIC_PATH = os.path.join(PROJ_ROOT_PATH, PROJ_CONF['static'])

  def is_version_data_existed(self):
    """Check if there are already existed data in BQ
    
    Method: iterate all models and 
    """
    # if exists, skip
    # return 
Ejemplo n.º 23
0
from __future__ import division
from __future__ import print_function

from absl import flags, app

from data.bert_tokenization import FullTokenizer
from data.bert_regression_data_lib import StsProcessor

FLAGS = flags.FLAGS

flags.DEFINE_string("input_data_dir", None, "")
flags.DEFINE_string("set_type", "train", "")
flags.DEFINE_string("vocab_file", None,
                    "The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
    "do_lower_case", True,
    "Whether to lower case the input text. Should be True for uncased "
    "models and False for cased models.")
flags.DEFINE_integer(
    "max_seq_length", 128,
    "The maximum total input sequence length after WordPiece tokenization. "
    "Sequences longer than this will be truncated, and sequences shorter "
    "than this will be padded.")


def main(_):
    tokenizer = FullTokenizer(FLAGS.vocab_file,
                              do_lower_case=FLAGS.do_lower_case)
    processor = StsProcessor()
    with open("token_check.txt", "w") as output_file:
        for example in processor.get_examples(FLAGS.input_data_dir,
                                              FLAGS.set_type):
Ejemplo n.º 24
0
    'resnet_checkpoint', '',
    'Location of the ResNet50 checkpoint to use for model '
    'initialization.')
flags.DEFINE_string(
    'training_file_pattern', None,
    'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
                    'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string('val_json_file', None,
                    'COCO validation JSON containing golden bounding boxes.')
tf.flags.DEFINE_integer('num_examples_per_epoch', 118287,
                        'Number of examples in one epoch')
tf.flags.DEFINE_integer('num_epochs', 15, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
                    'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
                  'training finishes.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')

# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
                     'Minimum seconds between evaluations.')

FLAGS = flags.FLAGS
_STOP = -1


def main(argv):
    del argv  # Unused.

    # TODO(b/132208296): remove this workaround that uses control flow v2.
    control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
Ejemplo n.º 25
0
from absl import app
from absl import flags

from utils.data import make_tfrecord
from utils.tokenization import FullTokenizer

FLAGS = flags.FLAGS
flags.DEFINE_list('file_patterns', None, "Document's files patterns.")
flags.DEFINE_integer('max_seq_length', 512, 'Maximum sequential length.')
flags.DEFINE_float('mask_prob', 0.1, 'The probability of mask LM number per sequence.')
flags.DEFINE_integer(
    'max_mask_length', 20, 'Maximum number of masked LM predictions per sequence.'
)
flags.DEFINE_bool(
    'random_seq', False,
    'Original Bert use random next sequence to predict. For Albert, we set this as False'
)
flags.DEFINE_bool(
    'whole_word_mask', False, 'Maximum number of masked LM predictions per sequence.'
)
flags.DEFINE_string(
    'tfrecord_file_prefix', 'data-training/albert-training-data',
    'Prefix used as saved tfrecord files.'
)
flags.DEFINE_integer('tfrecord_file_num', 10, 'How many tfrecord files to store.')
flags.DEFINE_bool('do_lower_case', True, '')
flags.DEFINE_string('vocab_file', None, '')


def main(_):
    tokenizer = FullTokenizer(
Ejemplo n.º 26
0
    "data_dir",
    default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
                         "bayesian_neural_network/data"),
    help="Directory where data is stored (if using real data).")
flags.DEFINE_string("model_dir",
                    default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
                                         "bayesian_neural_network/"),
                    help="Directory to put the model's fit.")
flags.DEFINE_integer("viz_steps",
                     default=400,
                     help="Frequency at which save visualizations.")
flags.DEFINE_integer("num_monte_carlo",
                     default=50,
                     help="Network draws to compute predictive probabilities.")
flags.DEFINE_bool("fake_data",
                  default=None,
                  help="If true, uses fake data. Defaults to real data.")

FLAGS = flags.FLAGS


def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
    """Save a PNG plot with histograms of weight means and stddevs.

  Args:
    names: A Python `iterable` of `str` variable names.
    qm_vals: A Python `iterable`, the same length as `names`,
      whose elements are Numpy `array`s, of any shape, containing
      posterior means of weight varibles.
    qs_vals: A Python `iterable`, the same length as `names`,
      whose elements are Numpy `array`s, of any shape, containing
Ejemplo n.º 27
0
flags.DEFINE_string('hparams', '', 'Hyper parameters.')

flags.DEFINE_string('split_name', 'validation', 'Name of the split.')

flags.DEFINE_float('moving_average_decay', 0.9999,
                   'The decay to use for the moving average.')

flags.DEFINE_integer('eval_interval_secs', 120,
                     'The frequency, in seconds, with which evaluation is run.')

flags.DEFINE_integer(
    'num_examples', -1,
    'If positive - maximum number of example to use for evaluation.')

flags.DEFINE_bool('eval_once', False,
                  'If true then evaluate model only once.')

flags.DEFINE_string('trainable_scopes', None,
                    'If set then it defines list of variable scopes for '
                    'trainable variables.')


def main(_):
  if not FLAGS.train_dir and not FLAGS.checkpoint_path:
    print('Either --train_dir or --checkpoint_path flags has to be provided.')
  if FLAGS.train_dir and FLAGS.checkpoint_path:
    print('Only one of --train_dir or --checkpoint_path should be provided.')
  params = model_lib.default_hparams()
  params.parse(FLAGS.hparams)
  tf.logging.info('User provided hparams: %s', FLAGS.hparams)
  tf.logging.info('All hyper parameters: %s', params)
Ejemplo n.º 28
0
from official.modeling.hyperparams import params_dict
from official.modeling.training import distributed_executor as executor
from official.utils import hyperparams_flags
from official.vision.detection.configs import factory as config_factory
from official.vision.detection.dataloader import input_reader
from official.vision.detection.dataloader import mode_keys as ModeKeys
from official.vision.detection.executor.detection_executor import DetectionDistributedExecutor
from official.vision.detection.modeling import factory as model_factory
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils

hyperparams_flags.initialize_common_flags()
flags_core.define_log_steps()

flags.DEFINE_bool('enable_xla', default=False, help='Enable XLA for GPU')

flags.DEFINE_string('mode',
                    default='train',
                    help='Mode to run: `train`, `eval` or `train_and_eval`.')

flags.DEFINE_string('model',
                    default='retinanet',
                    help='Model to run: `retinanet` or `mask_rcnn`.')

flags.DEFINE_string('training_file_pattern', None,
                    'Location of the train data.')

flags.DEFINE_string('eval_file_pattern', None, 'Location of ther eval data')

flags.DEFINE_string(
Ejemplo n.º 29
0
flags.DEFINE_integer('corruptions_interval', 135,
                     'Number of epochs between evaluating on the corrupted '
                     'test data. Use -1 to never evaluate.')
flags.DEFINE_integer('checkpoint_interval', 27,
                     'Number of epochs between saving checkpoints. Use -1 to '
                     'never save checkpoints.')
flags.DEFINE_string('alexnet_errors_path', None,
                    'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')

flags.DEFINE_integer('num_eval_samples', 1,
                     'Number of model predictions to sample per example at '
                     'eval time.')

# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', True, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
                    'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS

# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = 1281167
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000

_LR_SCHEDULE = [    # (multiplier, epoch to start) tuples
    (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
Ejemplo n.º 30
0
                   'Maximum rendering time for plotting/playback')
flags.DEFINE_float('minRenderTime', 0.01,
                   'Minimum rendering time for plotting/playback')
flags.DEFINE_float('qvelMax', 1e-05,
                   'Joint velocity threshold to continue rendering')
flags.DEFINE_float('qvelMean', 1e-06,
                   'Joint velocity mean threshold to continue rendering')
flags.DEFINE_float('qaccMax', 1e-05,
                   'Joint acceleration threshold to continue rendering')
flags.DEFINE_float('qaccMean', 1e-06,
                   'Joint acceleration mean threshold to continue rendering')

flags.DEFINE_string('model_filename', 'ratMocap\\models\\ratMocap.xml',
                    'filename for model.')
flags.DEFINE_string('outName', None, 'filename and path for output.')
flags.DEFINE_bool('qOnly', True, 'Whether to make only .mat of joint angles.')
flags.DEFINE_bool('record', False, 'Whether to write a video file.')
flags.DEFINE_bool('play', False, 'Whether to play a video.')
flags.DEFINE_bool('save', True, 'Whether to save the rendering.')
flags.DEFINE_bool('silent', False, 'Whether to display rendering progress.')


def get_model_and_assets():
    """Returns a tuple containing the model XML string and a dict of assets."""
    return common.read_model(FLAGS.model_filename), common.ASSETS


class jeffRat(base.Task):
    """A task for the Rat skeleton."""
    def __init__(self, random=None):
        """Initializes an instance of `jeffRat`.