예제 #1
0
  def test_flag_help_in_xml_enum_class(self):
    class Version(enum.Enum):
      STABLE = 0
      EXPERIMENTAL = 1

    flags.DEFINE_enum_class('cc_version', 'STABLE', Version,
                            'Compiler version to use.', flag_values=self.fv)
    expected_output = ('<flag>\n'
                       '  <file>tool</file>\n'
                       '  <name>cc_version</name>\n'
                       '  <meaning>&lt;stable|experimental&gt;: '
                       'Compiler version to use.</meaning>\n'
                       '  <default>stable</default>\n'
                       '  <current>Version.STABLE</current>\n'
                       '  <type>enum class</type>\n'
                       '  <enum_value>STABLE</enum_value>\n'
                       '  <enum_value>EXPERIMENTAL</enum_value>\n'
                       '</flag>\n')
    self._check_flag_help_in_xml('cc_version', 'tool', expected_output)
예제 #2
0
import config
import time
import matplotlib.pyplot as plt
from dataset import build_dataset
from config import TrainMode
import pandas as pd
import cv2

from absl import app, flags, logging
from tqdm import tqdm

FLAGS = flags.FLAGS

flags.DEFINE_enum_class(
    'train_mode',
    default=TrainMode.ALL,
    enum_class=TrainMode,
    help="Select training mode, one of {'ALL', 'GAN', 'VAE'}")

# GAN functions.
seed = tf.random.normal([4, config.NZ])

generator = Generator().build()
discriminator = Discriminator().build()
inv_generator = invG().build()

cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator_opt = tf.keras.optimizers.Adam(config.LR)
discriminator_opt = tf.keras.optimizers.Adam(config.LR)

from absl import app, flags
from enum import unique, Enum
from kazoo.client import KazooClient
from yolo3.utils import get_classes


@unique
class MODE(Enum):
    STATUS = 1
    CONFIG = 2
    ZOOKEEPER = 3


FLAGS = flags.FLAGS
flags.DEFINE_enum_class("mode",
                        default=MODE.ZOOKEEPER,
                        enum_class=MODE,
                        help='exec mode')
flags.DEFINE_multi_string("addresses",
                          default=[
                              "10.12.102.32:8500", "10.12.102.33:8500",
                              "10.12.102.52:8500", "10.12.102.53:8500"
                          ],
                          help='grpc servers address')


def get_config(*args):
    return bytes('#'.join(str(arg) for arg in args), encoding="utf8")


def main(_):
    if MODE.STATUS == FLAGS.mode:
예제 #4
0
import code_to_subtokenized_sentences
import tokenizer_registry

FLAGS = flags.FLAGS

flags.DEFINE_string('vocabulary_filepath', None,
                    'Path to the subword vocabulary.')

flags.DEFINE_string('input_filepath', None,
                    'Path to the Python source code file.')

flags.DEFINE_string('output_filepath', None,
                    'Path to the output file of subtokenized source code.')

flags.DEFINE_enum_class('tokenizer',
                        default=tokenizer_registry.TokenizerEnum.PYTHON,
                        enum_class=tokenizer_registry.TokenizerEnum,
                        help='The tokenizer to use.')


def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    # The value of the `TokenizerEnum` is a `CuBertTokenizer` subclass.
    tokenizer = FLAGS.tokenizer.value()
    subword_tokenizer = text_encoder.SubwordTextEncoder(
        FLAGS.vocabulary_filepath)

    with open(FLAGS.input_filepath, 'r') as input_file:
        code = input_file.read()
        # print('#' * 80)
예제 #5
0
    ".tfrecord",
    "Should be '.tfrecod' or '.txtpb'",
)
flags.DEFINE_string(
    "vocab_file",
    None,
    "The vocabulary file that the BERT model was trained on.",
)
flags.DEFINE_bool(
    "convert_to_examples",
    True,
    "If true convert interactions to examples.",
)
flags.DEFINE_enum_class(
    "mode",
    intermediate_pretrain_utils.Mode.ALL,
    intermediate_pretrain_utils.Mode,
    "Mode to run in.",
)
flags.DEFINE_integer(
    "max_seq_length",
    128,
    "See tf_example_utils.ClassifierConversionConfig",
)
flags.DEFINE_boolean(
    "use_fake_table",
    False,
    "Replace table with a constant.",
)
flags.DEFINE_boolean(
    "add_opposite_table",
    False,
예제 #6
0
"""Utilities for running beam pipelines."""

import enum

from absl import flags
from apache_beam import runners
from apache_beam.options import pipeline_options
from apache_beam.runners.direct import direct_runner


class RunnerType(enum.Enum):
    DIRECT = 1
    DATAFLOW = 2


flags.DEFINE_enum_class("runner_type", RunnerType.DIRECT, RunnerType,
                        "Runner type to use.")
# Google Cloud options.
# See https://beam.apache.org/get-started/wordcount-example/
flags.DEFINE_string("gc_project", None, "e.g. my-project-id")
# GC regions: https://cloud.google.com/compute/docs/regions-zones
flags.DEFINE_string("gc_region", None, "e.g. us-central1")
flags.DEFINE_string("gc_job_name", None, "e.g. myjob")
flags.DEFINE_string("gc_staging_location", None,
                    "e.g. gs://your-bucket/staging")
flags.DEFINE_string("gc_temp_location", None, "e.g. gs://your-bucket/temp")
flags.DEFINE_boolean("save_main_session", False,
                     "Useful when getting NameErrors from global imports.")
# Pass Tapas sources to GC.
# See https://beam.apache.org/documentation/sdks/python-pipeline-dependencies/
flags.DEFINE_list(
    "extra_packages",
from typing import Sequence

from absl import app
from absl import flags

from squiggles import generate_data

_BASE_PATH = flags.DEFINE_string(
    'base_path',
    None,
    'Output filename base (including directory path).',
    required=True,
)
_SQUIGGLE_ALGORITHM = flags.DEFINE_enum_class(
    'squiggle_algorithm',
    generate_data.LatentSpace.UNDEFINED,
    generate_data.LatentSpace,
    'Which latent space to use to generate squiggles: "sine_net" or "taylor"',
)
_SAMPLES_PER_SHARD = flags.DEFINE_integer(
    'samples_per_shard',
    None,
    'The number of squiggle examples to include in each shard.',
    required=True,
)
_NUM_TRAIN_SHARDS = flags.DEFINE_integer(
    'num_train_shards',
    1,
    'how many shards in the "train" split',
)
_NUM_TEST_SHARDS = flags.DEFINE_integer(
    'num_test_shards',
예제 #8
0
tf.compat.v1.enable_eager_execution()
tf.compat.v1.enable_v2_behavior()
tf.compat.v1.enable_v2_tensorshape()
from yolo3.enums import BACKBONE, MODE, OPT
from train import train
from train_backbone import train as train_backbone
from yolo import YOLO, detect_video, detect_img, export_tflite_model, export_serving_model, calculate_map, export_tfjs_model
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

FLAGS = flags.FLAGS

flags.DEFINE_enum_class(
    'backbone',
    default=BACKBONE.MOBILENETV2,
    enum_class=BACKBONE,
    help=
    "Select network backbone, One of {'MOBILENETV2','DARKNET53','EFFICIENTNET'}"
)
flags.DEFINE_integer('batch_size',
                     default=8,
                     lower_bound=0,
                     help="Train batch size")
flags.DEFINE_string('config', default=None, help="Config path")
flags.DEFINE_multi_integer('epochs',
                           default=[10, 10],
                           lower_bound=0,
                           help="Frozen train epochs and Full train epochs")
flags.DEFINE_string('export', default='export_model/8', help="Export path")
flags.DEFINE_string('input', default=None, help="Input data for various mode")
flags.DEFINE_multi_integer('input_size',
예제 #9
0
                                'Path to file defining a config dict.')

flags.DEFINE_integer(
    'batch_size', default=128, help=('Batch size for training.'))

flags.DEFINE_bool('cache', default=False, help=('If True, cache the dataset.'))

flags.DEFINE_integer(
    'num_epochs', default=250, help=('Number of training epochs.'))


flags.DEFINE_enum_class(
    'lr_scheduler',
    default=LRScheduler.COSINE,
    enum_class=LRScheduler,
    help=('Learning rate scheduler. Can be set to COSINE or STEP. '
          'COSINE: Cosine decay, tuned to achieve target eval acc quickly. '
          'STEP: Piecewise step decay, uses constant lr at every interval,'
          ' allowing loss to flatten out at each lr, and exponential'
          ' decay of lr throughout the intervals.'))

flags.DEFINE_bool(
    'half_precision',
    default=True,
    help=('If bfloat16/float16 should be used instead of float32.'))


flags.DEFINE_integer(
    'state_dict_summary_freq',
    default=200,
    help='Number of training steps between state dict summaries reported to '
예제 #10
0
flags.DEFINE_list('stoichiometries', [], 'List of stoichiometries to query')
flags.DEFINE_string(
    'smarts', '',
    'SMARTS query to retrieve molomers with matching bond topology. '
    'Note that this is a single value, not a comma separated list')
flags.DEFINE_list(
    'topology_query_smiles', [],
    'List of smiles to query, where the valid bond lengths are '
    'given by --bond_lengths_csv and --bond_lengths. '
    'Will return all molecules where the given smiles is a '
    'valid decsription of that geometry given the bond lengths. '
    'If you are using the default bond lengths, you should just '
    'use --smiles as this method is much slower.')
flags.DEFINE_float('random_fraction', 0.0,
                   'Randomly return this fraction of DB.')
flags.DEFINE_enum_class('output_format', OutputFormat.PBTXT, OutputFormat,
                        'Format for the found SMU entries')
flags.DEFINE_enum_class(
    'which_topologies', smu_utils_lib.WhichTopologies.ALL,
    smu_utils_lib.WhichTopologies, 'This flag has double duty. '
    'For btids, smiles, and smarts queries, it specifies which topologies'
    'to match. For sdf and atomic2_input output formats, it specifies which bond '
    'topologies should be returned:\n '
    '"all" means all topologies,\n '
    '"best" means a single best topology,\n '
    '"starting" means the single topology used for the calculations,\n '
    '"itc" means all topologies detected with our original bond lengths,\n '
    '"mlcr" means all topologies using very permissive covalent radii\n '
    '(from Meng and Lewis), '
    '"csd" means all topologies using bond lengths from the '
    'Cambridge Structural Database')
flags.DEFINE_boolean(
예제 #11
0
from meta_dataset.data import utils
import tensorflow.compat.v1 as tf

tf.enable_eager_execution()

flags.DEFINE_multi_string('gin_config', None,
                          'List of paths to the config files.')
flags.DEFINE_multi_string('gin_bindings', None,
                          'List of Gin parameter bindings.')
flags.DEFINE_string('output_dir', '/tmp/cached_episodes/',
                    'Root directory for saving episodes.')
flags.DEFINE_integer('num_episodes', 600, 'Number of episodes to sample.')
flags.DEFINE_string('dataset_name', 'omniglot', 'Dataset name to create '
                    'episodes from.')
flags.DEFINE_enum_class('split', learning_spec.Split.TEST, learning_spec.Split,
                        'See learning_spec.Split for '
                        'allowed values.')
flags.DEFINE_boolean(
    'ignore_dag_ontology', False, 'If True the dag ontology'
    ' for Imagenet dataset is not used.')
flags.DEFINE_boolean(
    'ignore_bilevel_ontology', False, 'If True the bilevel'
    ' sampling for Omniglot dataset is not used.')
tf.flags.DEFINE_string(
    'records_root_dir', '',
    'Root directory containing a subdirectory per dataset.')
FLAGS = flags.FLAGS


def main(unused_argv):
    logging.info(FLAGS.output_dir)
예제 #12
0
class Model(enum.Enum):
    vae = enum.auto()
    avae = enum.auto()


class EncoderArch(enum.Enum):
    color_mnist_mlp_encoder = 'ColorMnistMLPEncoder'


class DecoderArch(enum.Enum):
    color_mnist_mlp_decoder = 'ColorMnistMLPDecoder'


_DATASET = flags.DEFINE_enum_class('dataset',
                                   data_iterators.Dataset.color_mnist,
                                   data_iterators.Dataset,
                                   'Dataset to train on')
_LATENT_DIM = flags.DEFINE_integer('latent_dim', 32,
                                   'Number of latent dimensions.')
_TRAIN_BATCH_SIZE = flags.DEFINE_integer('train_batch_size', 64,
                                         'Train batch size.')
_TEST_BATCH_SIZE = flags.DEFINE_integer('test_batch_size', 64,
                                        'Testing batch size.')
_TEST_EVERY = flags.DEFINE_integer('test_every', 1000,
                                   'Test every N iterations.')
_ITERATIONS = flags.DEFINE_integer('iterations', 102000,
                                   'Number of training iterations.')
_OBS_VAR = flags.DEFINE_float(
    'obs_var', 0.5, 'Observation variance of the data. (Default 0.5)')

_MODEL = flags.DEFINE_enum_class('model', Model.avae, Model,
예제 #13
0
flags.DEFINE_integer('malmo_base_port',
                     default=9001,
                     help='malmo base port (to prevent conflicts)')
flags.DEFINE_integer('train_seed', default=0, help='random seed for train')
flags.DEFINE_string('test_model',
                    default='impala',
                    help='which model to test (bc/impala)')

# env params:
flags.DEFINE_integer(
    'max_step_mul',
    default=40,
    help='maximum step multiplier; > 0, 1 = no step multiplier')
flags.DEFINE_integer('pov_resolution', default=32, help='pov resolution')
flags.DEFINE_enum_class('pov_color_space',
                        default=ColorSpace.RGB,
                        enum_class=ColorSpace,
                        help='pov color space')
flags.DEFINE_integer('num_camera_actions',
                     default=3,
                     help='number of discrete camera actions (odd value!)')
flags.DEFINE_integer('camera_max_angle',
                     default=30,
                     help='max camera angle change in one direction')
flags.DEFINE_bool('fixed_step_mul',
                  default=False,
                  help='fixed step multiplier')
flags.DEFINE_integer('step_mul',
                     default=8,
                     help='step multiplier, if fixed_step_mul=True')

# model params:
예제 #14
0
from tensorflow_serving.apis import get_model_status_pb2, model_service_pb2_grpc, model_management_pb2
import grpc
from absl import app, flags
from enum import unique, Enum


@unique
class MODE(Enum):
    STATUS = 1
    CONFIG = 2


FLAGS = flags.FLAGS
flags.DEFINE_enum_class("mode",
                        default=MODE.STATUS,
                        enum_class=MODE,
                        help='exec mode')
flags.DEFINE_string("address",
                    default="10.12.102.32:8500",
                    help='grpc server address')


def main(_):
    channel = grpc.insecure_channel(FLAGS.address)

    stub = model_service_pb2_grpc.ModelServiceStub(channel)
    if MODE.STATUS == FLAGS.mode:
        request = get_model_status_pb2.GetModelStatusRequest()
        request.model_spec.name = 'pascal'
        request.model_spec.signature_name = 'serving_default'
        result = stub.GetModelStatus(request)
예제 #15
0
    "The number of training examples in one forward/backward pass.",
)
flags.DEFINE_float(
    "base_learning_rate",
    13e-4,
    "The default rate at which weights are updated during training.",
)
flags.DEFINE_bool(
    "add_self_attention",
    True,
    "Add self attention instead of Cross attention.",
)

flags.DEFINE_enum_class(
    "fusion_method",
    FusionMethod.CONCAT,
    FusionMethod,
    "Fusion method for joint representation.",
)
flags.DEFINE_bool("use_train_and_val", True,
                  "Use both train and val for training.")

flags.DEFINE_integer("seed", 1204, "Random seed.")
flags.DEFINE_integer("hidden_dimension", 1024, "Dimension of hidden states.")
flags.DEFINE_integer("number_of_epochs", 70, "Number of epochs for training.")

flags.DEFINE_integer("start_epoch", 0,
                     "Epoch at which training should start/restart.")
flags.DEFINE_integer("warmup_length", 5,
                     "Number of epochs for the warmup stage.")
flags.DEFINE_float("warmup_factor", 0.5,
                   "Factor by which learning rate is multiplied.")
예제 #16
0
from absl import flags
from injector import Binder, Module, inject, singleton

from rep0st.framework import app
from rep0st.framework.scheduler import Scheduler, SchedulerModule
from rep0st.service.feature_service import FeatureService, FeatureServiceModule
from rep0st.db.post import Type as PostType

log = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DEFINE_string(
    'rep0st_update_features_job_schedule', '* * * * * *',
    'Schedule in crontab format for running the feature update job.')
flags.DEFINE_enum_class(
    'rep0st_update_features_post_type', PostType.IMAGE, PostType,
    'The post type (image, video, ...) this job should index.')


class UpdateFeaturesJobModule(Module):
    def configure(self, binder: Binder):
        binder.install(FeatureServiceModule)
        binder.install(SchedulerModule)
        binder.bind(UpdateFeaturesJob)


@singleton
class UpdateFeaturesJob:
    feature_service: FeatureService

    @inject
flags.DEFINE_string("input_interactions_dir", None, "Directory with inputs.")
flags.DEFINE_string("input_tables_dir", None, "Directory with inputs.")
flags.DEFINE_string("output_dir", None, "Directory with outputs.")
flags.DEFINE_string("vocab_file", None,
                    "The vocabulary file that the BERT model was trained on.")
flags.DEFINE_integer("max_seq_length", None,
                     "Max length of a sequence in word pieces.")
flags.DEFINE_float("max_column_id", None, "Max column id to extract.")
flags.DEFINE_float("max_row_id", None, "Max row id to extract.")
flags.DEFINE_integer(
    "cell_trim_length", -1,
    "If > 0: Trim cells so that the length is <= this value.")
flags.DEFINE_boolean("use_document_title", None,
                     "Include document title text in the tf example.")
flags.DEFINE_enum_class("converter_impl", create_data.ConverterImplType.PYTHON,
                        create_data.ConverterImplType,
                        "Implementation to map interactions to tf examples.")
FLAGS = flags.FLAGS


def run(inputs, outputs, input_format):
    beam_runner.run(
        create_data.build_retrieval_pipeline(
            input_files=inputs,
            input_format=input_format,
            output_files=outputs,
            config=tf_example_utils.RetrievalConversionConfig(
                vocab_file=FLAGS.vocab_file,
                max_seq_length=FLAGS.max_seq_length,
                max_column_id=FLAGS.max_column_id,
                max_row_id=FLAGS.max_row_id,
예제 #18
0
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext


class DatasetType(enum.Enum):
    text = 1
    graph = 2
    wikitext = 3


FLAGS = flags.FLAGS
flags.DEFINE_string(
    'data_dir', '', 'Path to the directory that contains the'
    ' unzipped wikitext-103 data.')
flags.DEFINE_string('vocab_file_path', '', 'Path to the output vocab file.')
flags.DEFINE_enum_class('data_type', DatasetType.wikitext, DatasetType,
                        'One of {`wikitext`, `graph`, `text`}.')
flags.DEFINE_integer(
    'threshold', 1, 'Frequency threshold for a word to be'
    ' included in the vocabulary.')
flags.DEFINE_string('version', 'max256',
                    'Which version of paired data to use.')


def get_vocab(dataset: wikitext.RawDataset) -> List[Tuple[str, int]]:
    """Build vocabulary, return (word, count) tuples sorted by count."""
    vocab = collections.defaultdict(int)

    for pair in dataset:
        for t in pair.text.split(' '):
            if t:
                vocab[t] += 1
예제 #19
0
"""Scripts to convert predictions file to other formats."""

from absl import app
from absl import flags
from tapas.scripts import convert_predictions_utils

FLAGS = flags.FLAGS

flags.DEFINE_list('interaction_files', None,
                  'A list of files contain interactions protos.')
flags.DEFINE_list('prediction_files', None,
                  'A list of files that contain model prediction.')
flags.DEFINE_string('output_directory', None,
                    'Output directory where converted files will be stored.')
flags.DEFINE_enum_class('dataset_format', None,
                        convert_predictions_utils.DatasetFormat,
                        'Dataset format.')


def main(_):
  convert_predictions_utils.convert(FLAGS.interaction_files,
                                    FLAGS.prediction_files,
                                    FLAGS.output_directory,
                                    FLAGS.dataset_format)


if __name__ == '__main__':
  flags.mark_flag_as_required('interaction_files')
  flags.mark_flag_as_required('prediction_files')
  flags.mark_flag_as_required('output_directory')
  flags.mark_flag_as_required('dataset_format')
from absl import app
from absl import flags

from density_functional_approximation_dm21 import neural_numint

_OUT_DIR = flags.DEFINE_string('out_dir',
                               None,
                               'Output directory.',
                               required=True)
_BATCH_SIZE = flags.DEFINE_integer(
    'batch_size',
    1000,
    'Number of grid points exported functional will process in a single call.',
    lower_bound=0)
_FUNCTIONAL = flags.DEFINE_enum_class('functional',
                                      neural_numint.Functional.DM21,
                                      neural_numint.Functional,
                                      'Functional to export.')


def export(
    functional: neural_numint.Functional,
    export_path: str,
    batch_dim: int,
) -> None:
    """Export a functional and its derivatives to a single saved_model.

  Args:
    functional: functional to export.
    export_path: path to saved the model to.
    batch_dim: number of grid points to process in a single call.
  """
예제 #21
0
                'Flag `step_size_transitions` needs to be divisible by'
                ' the `batch_size` x `unroll_length`.')
        return FLAGS.step_size_transitions // (batch_size * unroll_length)
    raise ValueError(
        'Exactly one of the flags `batches_per_step`, '
        '`step_size_unroll` and `step_size_transitions` needs to be '
        'non-zero.')


flags.DEFINE_integer(
    'epochs_per_step', 1, 'How many times to pass over all the'
    ' batches during a training step.')

# Profiling.
flags.DEFINE_enum_class(
    'profile_inference_return', learner_config.InferenceReturn.END,
    learner_config.InferenceReturn,
    'Allows early returns in the inference function to profile performance.')

FLAGS = flags.FLAGS


def training_config_from_flags() -> learner_config.TrainingConfig:
    """Returns training config from the command line flags."""
    return learner_config.TrainingConfig(
        batch_mode=FLAGS.batch_mode,
        batch_size=batch_size_from_flags(),
        batches_per_step=batches_per_step_from_flags(),
        block_inference_on_training=FLAGS.block_inference_on_training,
        clip_norm=FLAGS.clip_norm,
        env_batch_size=FLAGS.env_batch_size,
        env_name=FLAGS.env_name,
예제 #22
0
  pbtxt = 1
  sdf_opt = 2
  sdf_init = 3
  sdf_init_opt = 4


flags.DEFINE_string(
    'input_sqlite', None,
    'Path of sqlite file to read. Must be on the local filesystem.')
flags.DEFINE_string(
    'output_path', None,
    'Path to output file to write. If not specified, will write to stdout.')
flags.DEFINE_list('btids', [], 'List of bond topology ids to query')
flags.DEFINE_list('cids', [], 'List of conformer ids to query')
flags.DEFINE_list('smiles', [], 'List of smiles to query')
flags.DEFINE_enum_class('output_format', OutputFormat.pbtxt, OutputFormat,
                        'Format for the found SMU entries')

FLAGS = flags.FLAGS


class PBTextOutputter:
  """Simple internal class to write entries to text protocol buffer."""

  def __init__(self, output_path):
    """Creates PBTextOutputter.

    Args:
      output_path: file path to write to
    """
    if output_path:
      self.outfile = gfile.GFile(output_path, 'w')
예제 #23
0
from cola import constants
from cola import contrastive
from cola import supervised

FLAGS = flags.FLAGS

flags.DEFINE_string("tpu_address", None, "TPU Address.")

flags.DEFINE_string("experiment_id", None,
                    "Unique id to use for model checkpointing.")

flags.DEFINE_string("strategy", "tpu",
                    "TF distribute strategy either of `tpu` or `gpu`.")

flags.DEFINE_enum_class("training_mode", constants.TrainingMode.SSL,
                        constants.TrainingMode, "Mode of model training.")

flags.DEFINE_string("model_dir", None,
                    "Path to directory where to store models.")

flags.DEFINE_enum_class(
    "ssl_dataset", constants.Dataset.AS, constants.Dataset,
    "Name of the dataset to use for self-supervised pre-training.")

flags.DEFINE_enum_class("ds_dataset", constants.Dataset.MUSAN,
                        constants.Dataset,
                        "Name of the downstream task dataset.")

flags.DEFINE_string("ssl_checkpoint_id", None,
                    "Self-supervised model checkpoint id.")
예제 #24
0
flags.DEFINE_string('graph_yaxis_title', 'AverageReturn',
                    'Title for the y-axis or event_name is used.')

flags.DEFINE_string('event_name', 'AverageReturn', 'Name of event to track.')
flags.DEFINE_integer('end_step', None,
                     'If set, processing of the event log ends on this step.')
flags.DEFINE_boolean('show_graph', False, 'If true, show graph in a window.')


class GraphAggTypes(enum.Enum):
  """Enum of options to aggregate data when generating a graph."""
  MEAN = 'mean'
  MEDIAN = 'median'


flags.DEFINE_enum_class('graph_agg', GraphAggTypes.MEAN, GraphAggTypes,
                        'Method to aggregate data for the graph.')
Number = Union[int, float]


class StatsBuilder(object):
  """Builds graphs and other summary information from eventlogs."""

  def __init__(self,
               eventlog_dirs: List[str],
               event_tag: str,
               output_path: str = '.',
               title: str = '',
               xaxis_title: str = 'steps',
               yaxis_title: Optional[str] = None,
               graph_agg: GraphAggTypes = GraphAggTypes.MEAN,
               output_prefix: str = 'results',
예제 #25
0
                    'The number of concurrent threads for throughput tests.')
flags.DEFINE_boolean(
    'ch_network_throughput_time', False,
    'throughput metrics will be average request times (ms) '
    'instead of rate (Mb/s).')
flags.DEFINE_integer('ch_network_throughput_samples', 5,
                     'The number of test samples for throughput tests.')
flags.DEFINE_boolean(
    'ch_network_throughput_slowest_thread', False, 'If set, '
    'throughput metrics will be based on the speed of the '
    'slowest thread instead of average speed X number of '
    'threads.')
flags.DEFINE_integer(
    'ch_network_tcp_samples', 10, 'The number of test samples '
    'for TCP tests (rtt, ssl or ttfb).')
flags.DEFINE_enum_class('ch_ssl_encryption_type', TlsEncryptionType.ECC,
                        TlsEncryptionType, 'Encryption type to use for SSL.')
CLIENT_ZONE = flags.DEFINE_string(
    'ch_client_zone', None,
    'zone to launch the network or storage test client in. ')
ENDPOINT_ZONE = flags.DEFINE_string(
    'ch_endpoint_zone', None,
    'zone to launch the network server or storage test bucket in. ')

FLAGS = flags.FLAGS
HTTP_DIR = '/var/www/html/web-probe'
OUTPUT = 'network.csv'
TRUE = 'True'
FALSE = 'False'


def CheckPrerequisites(_):
예제 #26
0
from supcon import enums
from supcon import hparams

FLAGS = flags.FLAGS

# Train and eval batch sizes. For TPU execution, these are the total batch sizes
# for all TPUs together, not the smaller per-TPU ones.
flags.DEFINE_integer('batch_size', 2048, 'Train batch size')
flags.DEFINE_integer('eval_batch_size', 128, 'The batch size to use for eval.')

# Architecture
flags.DEFINE_float('resnet_width', 1., 'Width of the resnet to be'
                   'used in the model')
flags.DEFINE_enum_class(
    'resnet_architecture', enums.EncoderArchitecture.RESNET_V1,
    enums.EncoderArchitecture, 'Name of the resnet'
    ' architecture to be used in the model.')
flags.DEFINE_integer('resnet_depth', 50, 'Depth of the Resnet model')
flags.DEFINE_bool(
    'zero_initialize_classifier', False,
    'Whether or not to initialize parameters in the classification head '
    'to 0. Otherwise uses glorot uniform.')
flags.DEFINE_bool('use_projection_batch_norm', False,
                  'Whether to use batch norm in the projection head.')
flags.DEFINE_bool(
    'use_projection_batch_norm_beta', True,
    'Whether projection head batch norm layers should have a beta (bias) '
    'parameter.')
flags.DEFINE_bool(
    'normalize_embedding', True,
    'If the outputs of the encoder should be normalized before being input to '
예제 #27
0
from typing import Any, List

import enum

from absl import flags
from injector import Binder, Injector, Module

FLAGS = flags.FLAGS


class Environment(enum.Enum):
    DEVELOPMENT = 'DEVELOPMENT'
    PRODUCTION = 'PRODUCTION'


flags.DEFINE_enum_class('environment', Environment.PRODUCTION, Environment,
                        'Environment this application is running in.')


class EnvironmentModule(Module):
    def configure(self, binder: Binder) -> None:
        binder.bind(Environment, to=FLAGS.environment)


def get_bindings(injector: Injector) -> List[Any]:
    if not injector:
        return []

    return list(injector.binder._bindings.keys()) + get_bindings(
        injector.parent)