Exemple #1
0
 def test_flag_help_in_xml_multi_enum(self):
   flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'],
                           ['APPLE', 'BANANA', 'CHERRY'],
                           'Compilation flavour.', flag_values=self.fv)
   expected_output = (
       '<flag>\n'
       '  <file>tool</file>\n'
       '  <name>flavours</name>\n'
       '  <meaning>Compilation flavour.;\n'
       '    repeat this option to specify a list of values</meaning>\n'
       '  <default>[\'APPLE\', \'BANANA\']</default>\n'
       '  <current>[\'APPLE\', \'BANANA\']</current>\n'
       '  <type>multi string enum</type>\n'
       '  <enum_value>APPLE</enum_value>\n'
       '  <enum_value>BANANA</enum_value>\n'
       '  <enum_value>CHERRY</enum_value>\n'
       '</flag>\n')
   self._check_flag_help_in_xml('flavours', 'tool', expected_output)
Exemple #2
0
    def test_serialize_roundtrip(self):
        # Use the global 'FLAGS' as the source, to ensure all the framework defined
        # flags will go through the round trip process.
        flags.DEFINE_string('testflag', 'testval', 'help', flag_values=FLAGS)

        flags.DEFINE_multi_enum('test_multi_enum_flag', ['x', 'y'],
                                ['x', 'y', 'z'],
                                'Multi enum help.',
                                flag_values=FLAGS)

        class Fruit(enum.Enum):
            APPLE = 1
            ORANGE = 2
            TOMATO = 3

        flags.DEFINE_multi_enum_class('test_multi_enum_class_flag',
                                      ['APPLE', 'TOMATO'],
                                      Fruit,
                                      'Fruit help.',
                                      flag_values=FLAGS)

        new_flag_values = flags.FlagValues()
        new_flag_values.append_flag_values(FLAGS)

        FLAGS.testflag = 'roundtrip_me'
        FLAGS.test_multi_enum_flag = ['y', 'z']
        FLAGS.test_multi_enum_class_flag = [Fruit.ORANGE, Fruit.APPLE]
        argv = ['binary_name'] + FLAGS.flags_into_string().splitlines()

        self.assertNotEqual(new_flag_values['testflag'], FLAGS.testflag)
        self.assertNotEqual(new_flag_values['test_multi_enum_flag'],
                            FLAGS.test_multi_enum_flag)
        self.assertNotEqual(new_flag_values['test_multi_enum_class_flag'],
                            FLAGS.test_multi_enum_class_flag)
        new_flag_values(argv)
        self.assertEqual(new_flag_values.testflag, FLAGS.testflag)
        self.assertEqual(new_flag_values.test_multi_enum_flag,
                         FLAGS.test_multi_enum_flag)
        self.assertEqual(new_flag_values.test_multi_enum_class_flag,
                         FLAGS.test_multi_enum_class_flag)
        del FLAGS.testflag
        del FLAGS.test_multi_enum_flag
        del FLAGS.test_multi_enum_class_flag
from typing import Sequence

from absl import app
from absl import flags

from fusion_tcv import named_array
from fusion_tcv import references
from fusion_tcv import tcv_common

_refs = flags.DEFINE_enum("refs", None, references.REFERENCES.keys(),
                          "Which references to print")
_count = flags.DEFINE_integer("count", 100, "How many timesteps to print.")
_freq = flags.DEFINE_integer("freq", 1, "Print only every so often.")
_fields = flags.DEFINE_multi_enum(
    "field", None, tcv_common.REF_RANGES.names(),
    "Which reference fields to print, default of all.")
flags.mark_flag_as_required("refs")


def print_ref(step: int, ref: named_array.NamedArray):
    print(f"Step: {step}")
    for k in (_fields.value or ref.names.names()):
        print(f"  {k}: [{', '.join(f'{v:.3f}' for v in ref[k])}]")


def main(argv: Sequence[str]) -> None:
    if len(argv) > 1:
        raise app.UsageError("Too many command-line arguments.")
    if _freq.value <= 0:
        raise app.UsageError("`freq` must be >0.")
# Directories.
flags.DEFINE_string('evaluation_out_dir', None,
                    'The output directory of evaluation results.')
flags.DEFINE_string('analysis_out_dir', None,
                    'The output directory of analysis results.')

# Configs.
flags.DEFINE_enum(
    'evaluation_config', None, evaluation_configs.EVALUATION_CONFIG_NAMES,
    'The name of the evaluation configuration. '
    'See evaluation_configs.EVALUATION_CONFIG_NAMES for the complete list '
    'of supported configs.')
flags.DEFINE_multi_enum(
    'sketch_estimator_configs', None,
    evaluation_configs.ESTIMATOR_CONFIG_NAMES,
    'The name of the estimator configuration documented in '
    'evaluation_configs.ESTIMATOR_CONFIG_NAMES. '
    'Can evaluate multiple estimator_config.')
flags.DEFINE_string('evaluation_run_name', None,
                    'The name of this evaluation run.')
flags.DEFINE_integer('num_runs',
                     None,
                     'The number of runs per scenario.',
                     lower_bound=1)

# Analysis parameters.
flags.DEFINE_float('error_margin',
                   0.05,
                   'a positive number setting the upper bound of the error. '
                   'By default, set to 0.05.',
                   lower_bound=0)
Exemple #5
0
                     lower_bound=1)

flags.DEFINE_float("lr", 1e-3, "learning rate", lower_bound=0.0)
flags.DEFINE_float("momentum", 0.0, "momentum", lower_bound=0.0)
flags.DEFINE_integer("batch_size", 256, "batch size ", lower_bound=1)
flags.DEFINE_integer("n_workers", 4, "number of data workers", lower_bound=0)
flags.DEFINE_integer("n_preferences",
                     5,
                     "number of preference vectors",
                     lower_bound=1)

flags.DEFINE_multi_enum(
    "dset",
    "all",
    [
        "mnist", "fashion", "fashion_and_mnist", "all", "celeba", "cifar10",
        "cifar100"
    ],
    "name of dataset to use",
)
flags.DEFINE_string("outdir", "runs", "Output dir to save results")
flags.DEFINE_enum("arch", "lenet", ["lenet", "resnet18", "resnet34"],
                  "network architecture to use")
flags.DEFINE_enum(
    "init",
    "xavier",
    ["xavier", "uniform", "normal", "kaiming"],
    "weight initialization method",
)
flags.DEFINE_enum(
    "solver",
Exemple #6
0
from absl import app, flags
from pathlib import Path
from data.datasets import DATASETS
from models.common import SPLIT_OPTIONS, AVAILABLE_MODELS
from models.measure_filter_helpers import measure_filter_model
from models.measure_helpers import METHODS
import warnings

warnings.filterwarnings("ignore")

FLAGS = flags.FLAGS

flags.DEFINE_multi_enum(
    "model_version",
    ["resnet18"],
    AVAILABLE_MODELS,
    f"Model version {AVAILABLE_MODELS}",
)
flags.DEFINE_multi_enum(
    "dataset",
    [],
    list(DATASETS.keys()),
    f"(optional) Dataset name, one of available datasets: {list(DATASETS.keys())}",
)
flags.DEFINE_multi_enum(
    "train_skip",
    [],
    list(SPLIT_OPTIONS.keys()),
    f"(optional) version of the train dataset size: {list(SPLIT_OPTIONS.keys())}",
)
flags.DEFINE_multi_enum(
Exemple #7
0
flags.DEFINE_integer('num_layers', 2, 'Number of message passing layers.')
flags.DEFINE_integer('message_layer_size', 32,
                     'Number of units in message layers.')
flags.DEFINE_integer('readout_layer_size', 32,
                     'Number of units in the readout layer.')

# Choose augmentations, if any.
flags.DEFINE_float(
    'aug_ratio', 0.2, 'Proportion of graph in terms of nodes '
    'or edges to augment.')
flags.DEFINE_float(
    'aug_prob', 0.2, 'Probability of applying an augmentation for a given '
    'graph.')
flags.DEFINE_multi_enum(
    'augmentations',
    default=[],
    enum_values=['drop_nodes', 'perturb_edges', 'mask_node_features'],
    help='Types of augmentations to perform on graphs. If an empty list is '
    'provided, then no augmentation will be applied to the data.')

# Flags for drop_nodes augmentation
flags.DEFINE_boolean(
    'perturb_node_features', False, 'When True, zeros out the '
    'features of dropped nodes. When False, does not '
    'affect the node features. Controls whether or not the '
    'drop_nodes function affects the `atoms` feature.')

# Flags for perturb_edges augmentation
flags.DEFINE_boolean(
    'drop_edges_only', False, 'If True, only drop edges '
    'when using the perturb_edges augmentation, rather than '
    're-adding the dropped edges between randomly selected '
Exemple #8
0
flags.DEFINE_string('train_dir', None, "Directory with training images.")
flags.DEFINE_string('val_dir', None, "Directory with validation images.")
flags.DEFINE_string('test_dir', None, "Directory with test images.")

flags.DEFINE_multi_integer(
    "train_crop", [128, 128, 3],
    'Crop to take from images for model training (H, W, C)')

# Noise Layers
flags.DEFINE_bool(
    'to_yuv', False, 'Wheter to convert input images to YUV -- strongly \
  recommended for RGB images and JPEG noise')

flags.DEFINE_multi_enum(
    'noise_layers', 'identity',
    ['identity', 'gaussian', 'crop', 'cropout', 'jpeg_mask', 'dropout'],
    'A sequence of noise layers to apply.')

flags.DEFINE_float(
    'crop_p', 0.5,
    "Crop proportion for crop layer. Keeps that proportion from encoded image and replaces rest from cover image."
)

flags.DEFINE_float(
    'cropout_p', 0.5,
    "Crop proportion for cropout layer. Keeps that proportion from encoded image and replaces rest from cover image."
)

flags.DEFINE_float(
    'dropout_p', 0.5,
    "Keep probability for dropout layer. Keeps that proportion from encoded image and replaces rest from cover image."
Exemple #9
0
glibc:
  description: Runs Glibc Microbenchmark.
  vm_groups:
    default:
      vm_spec: *default_dual_core
      vm_count: null
"""

glibc_default_benchset = [
    'bench-math', 'bench-pthread', 'bench-string', 'string-benchset',
    'wcsmbs-benchset', 'stdlib-benchset', 'stdio-common-benchset',
    'math-benchset', 'malloc-thread'
]
flags.DEFINE_multi_enum(
    'glibc_benchset', glibc_default_benchset, glibc_default_benchset,
    'By default, it will run the whole set of benchmarks. To run only a subset '
    'of benchmarks, one may set "glibc_benchset = bench-math bench-pthread" by '
    'using the flag on the command line multiple times.')

GLIBC_BENCH = ['bench-math', 'bench-pthread', 'bench-string']
GLIBC_BENCH_MALLOC = ['malloc-thread']
# TODO(user): Parse other *-benchset benchmarks.
GLIBC_MATH_BENCHSET = ['math-benchset']

RESULTS_DIR = '%s/glibc/glibc-build/benchtests' % linux_packages.INSTALL_DIR


def GetConfig(user_config):
    return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)

Exemple #10
0
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ch_block_storage

BENCHMARK_NAME = 'ch_block_storage'
BENCHMARK_CONFIG = """
ch_block_storage:
  description: Runs cloudharmony block storage tests.
  vm_groups:
    default:
      vm_spec: *default_single_core
      disk_spec: *default_500_gb
"""

flags.DEFINE_multi_enum(
    'ch_block_tests', ['iops'],
    ['iops', 'throughput', 'latency', 'wsat', 'hir'],
    'A list of tests supported by CloudHarmony block storage benchmark.')

FLAGS = flags.FLAGS


def GetConfig(user_config):
    config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
    disk_spec = config['vm_groups']['default']['disk_spec']
    # Use raw device
    # TODO(yuyanting): Support filesystem?
    for cloud in disk_spec:
        disk_spec[cloud]['mount_point'] = None
    return config

from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import omb

BENCHMARK_NAME = 'omb'
BENCHMARK_CONFIG = """
omb:
  description: OSU MPI micro-benchmarks
  vm_groups:
    default:
      vm_count: 2
      vm_spec: *default_single_core
"""

_BENCHMARKS_ARG = flags.DEFINE_multi_enum(
    'omb_benchmarks', None, sorted(omb.BENCHMARKS),
    'OSU micro-bencmarks to run.  Default is to run all')
_RUN_LONG_LATENCY = flags.DEFINE_bool(
    'omb_run_long_latency', False,
    'Whether to run the very long latency test get_acc_latency and latency_mt.'
)
_MESSAGE_SIZES = flags.DEFINE_list(
    'omb_message_sizes', None, '--message-size values to pass in.  Value of '
    '"1:8,1024" will run sizes 1,2,4,8,1024.  Default is to run all sizes')
FLAGS = flags.FLAGS


def GetConfig(user_config: Dict[str, Any]) -> Dict[str, Any]:
    return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)

Exemple #12
0
from absl import app
from absl import flags

from koch import db
from koch import extract
from koch import pipeline
from koch.proto import document_pb2

ALL_POS = set([wordnet.NOUN, wordnet.VERB, wordnet.ADJ, wordnet.ADV])

FLAGS = flags.FLAGS
flags.DEFINE_string("parse_output", None,
                    "Output path to write parsed html to.")
flags.DEFINE_boolean("parse_debug", False, "Whether to use the debug writer.")

flags.DEFINE_multi_enum("parse_pos", [], ALL_POS, "Parts of speech to retain")


def add_blob(doc, text, pos):
    blob = doc.blobs.add()
    blob.text = text
    blob.position.extend(pos)


def build_blobs(html_element, doc, pos):
    if html_element.text:
        add_blob(doc, html_element.text, pos)
    for i, child in enumerate(html_element.children):
        build_blobs(child, doc, pos + [i])
    if html_element.tail:
        add_blob(doc, html_element.tail, pos[:-1])
Exemple #13
0
from absl import app, flags, logging

import fasttext
"""
Filter lines of text by language using fasttext
"""
FLAGS = flags.FLAGS
# https://fasttext.cc/docs/en/language-identification.html
VALID_LANGS = "af als am an ar arz as ast av az azb ba bar bcl be bg bh bn bo bpy br bs bxr ca cbk ce ceb ckb co cs cv cy da de diq dsb dty dv el eml en eo es et eu fa fi fr frr fy ga gd gl gn gom gu gv he hi hif hr hsb ht hu hy ia id ie ilo io is it ja jbo jv ka kk km kn ko krc ku kv kw ky la lb lez li lmo lo lrc lt lv mai mg mhr min mk ml mn mr mrj ms mt mwl my myv mzn nah nap nds ne new nl nn no oc or os pa pam pfl pl pms pnb ps pt qu rm ro ru rue sa sah sc scn sco sd sh si sk sl so sq sr su sv sw ta te tg th tk tl tr tt tyv ug uk ur uz vec vep vi vls vo wa war wuu xal xmf yi yo yue zh".split(
)

flags.DEFINE_multi_enum(
    "lang",
    enum_values=VALID_LANGS,
    default="en",
    help=
    "language to include. can be specified multiple time for multi language",
)
flags.DEFINE_float(
    "threshold",
    default=0.9,
    help=
    "language to include. can be specified multiple time for multi language",
)
flags.DEFINE_boolean("debug", False, "Produces debugging output.")
flags.DEFINE_boolean("reverse", False, "reverse the filter")

ext = ".bin"
if not os.path.exists("lid.176" + ext):
    logging.info("downloading model")
Exemple #14
0
    default='DAN',
    enum_values=['RNN', 'CNN', 'SPARSE', 'DAN', 'DENSE', 'RMLP', "VHN"],
    help='The type of clinical snapshot encoder to use')

flags.DEFINE_float(
    'augment_negatives',
    default=0.,
    lower_bound=0.,
    upper_bound=1.,
    help=
    'Augment negative examples by randomly truncating the given percent of positive examples to '
    'end early')
flags.DEFINE_multi_enum(
    'only_augmented_negatives',
    default=[],
    enum_values=["train", "devel", "test"],
    short_name='oan',
    help=
    'Use ignore negative examples in the train/dev/test data, and evaluate/train on only '
    'augmented negative examples (legacy behavior)')

# RNN
flags.DEFINE_multi_integer(
    'snapshot_rnn_num_hidden',
    default=[200],
    lower_bound=1,
    help=
    'The size of hidden layer(s) used for combining clinical observations to produce the '
    'clinical snapshot encoding; multiple arguments result in multiple hidden layers'
)
flags.DEFINE_enum('snapshot_rnn_cell_type',
                  default='RAN',
Exemple #15
0
import os
import torch

from absl import app, flags
from training.resnet_tune import train_resnet
from training.efficientnet_tune import train_efficientnet
from training.densenet_tune import train_densenet
from data.datasets import DATASETS
from models.common import SPLIT_OPTIONS, AVAILABLE_MODELS

FLAGS = flags.FLAGS

flags.DEFINE_multi_enum(
    "model_version",
    ["resnet18"],
    AVAILABLE_MODELS,
    f"Model version {AVAILABLE_MODELS}",
)
flags.DEFINE_multi_enum(
    "dataset",
    ["edible-plants"],
    list(DATASETS.keys()),
    f"Dataset name, one of available datasets: {list(DATASETS.keys())}",
)

out_folder = os.path.join("models", "saved_models")
data_dir = os.path.join("data")


def main(_argv):
    for model_version in FLAGS.model_version:
    def test_write_help_in_xmlformat(self):
        fv = flags.FlagValues()
        # Since these flags are defined by the top module, they are all key.
        flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv)
        flags.DEFINE_integer('nb_iters',
                             17,
                             'An integer flag',
                             lower_bound=5,
                             upper_bound=27,
                             flag_values=fv)
        flags.DEFINE_string('file_path',
                            '/path/to/my/dir',
                            'A test string flag.',
                            flag_values=fv)
        flags.DEFINE_boolean('use_gpu',
                             False,
                             'Use gpu for performance.',
                             flag_values=fv)
        flags.DEFINE_enum('cc_version',
                          'stable', ['stable', 'experimental'],
                          'Compiler version to use.',
                          flag_values=fv)
        flags.DEFINE_list('files',
                          'a.cc,a.h,archive/old.zip',
                          'Files to process.',
                          flag_values=fv)
        flags.DEFINE_list('allow_users', ['alice', 'bob'],
                          'Users with access.',
                          flag_values=fv)
        flags.DEFINE_spaceseplist('dirs',
                                  'src libs bins',
                                  'Directories to create.',
                                  flag_values=fv)
        flags.DEFINE_multi_string('to_delete', ['a.cc', 'b.h'],
                                  'Files to delete',
                                  flag_values=fv)
        flags.DEFINE_multi_integer('cols', [5, 7, 23],
                                   'Columns to select',
                                   flag_values=fv)
        flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'],
                                ['APPLE', 'BANANA', 'CHERRY'],
                                'Compilation flavour.',
                                flag_values=fv)
        # Define a few flags in a different module.
        module_bar.define_flags(flag_values=fv)
        # And declare only a few of them to be key.  This way, we have
        # different kinds of flags, defined in different modules, and not
        # all of them are key flags.
        flags.declare_key_flag('tmod_bar_z', flag_values=fv)
        flags.declare_key_flag('tmod_bar_u', flag_values=fv)

        # Generate flag help in XML format in the StringIO sio.
        sio = io.StringIO() if six.PY3 else io.BytesIO()
        fv.write_help_in_xml_format(sio)

        # Check that we got the expected result.
        expected_output_template = EXPECTED_HELP_XML_START
        main_module_name = sys.argv[0]
        module_bar_name = module_bar.__name__

        if main_module_name < module_bar_name:
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
        else:
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE

        expected_output_template += EXPECTED_HELP_XML_END

        # XML representation of the whitespace list separators.
        whitespace_separators = _list_separators_in_xmlformat(
            string.whitespace, indent='    ')
        expected_output = (expected_output_template % {
            'basename_of_argv0': os.path.basename(sys.argv[0]),
            'usage_doc': sys.modules['__main__'].__doc__,
            'main_module_name': main_module_name,
            'module_bar_name': module_bar_name,
            'whitespace_separators': whitespace_separators
        })

        actual_output = sio.getvalue()
        self.assertMultiLineEqual(expected_output, actual_output)

        # Also check that our result is valid XML.  minidom.parseString
        # throws an xml.parsers.expat.ExpatError in case of an error.
        xml.dom.minidom.parseString(actual_output)
Exemple #17
0
flags.DEFINE_multi_string(
    "raw_paths", [],
    "The paths to the test data in its original .binproto or .lftxt format.")
flags.DEFINE_string(
    "visualisation_folder", None,
    "If set, a comparison of the target/hypothesis labeling is saved in .html "
    "format")
flags.DEFINE_boolean(
    "strict_eval", False,
    "Only used for scoring. If True, a label must not begin with an 'I-' tag.")
flags.DEFINE_boolean(
    "train_with_additional_labels", False,
    "Needs to be set if the flags other than address/phone were used for "
    "training, too.")
flags.DEFINE_multi_enum(
    "save_output_formats", [], ["lftxt", "binproto", "tfrecord"],
    "If set, the hypotheses are saved in the corresponding formats.")
flags.DEFINE_string("output_directory", None,
                    "Controls where to save the hypotheses.")
flags.DEFINE_integer(
    "moving_window_overlap", 20,
    "The size of the overlap for a moving window. "
    "Setting it to zero restores the default behaviour of hard splitting.")
flags.DEFINE_integer(
    "max_seq_length", 128,
    "The maximal sequence length. Longer sequences are split.")
flags.DEFINE_integer("batch_size", 64, "The number of samples per batch.")
flags.DEFINE_string(
    "tpu_address", None,
    "The internal address of the TPU node, including 'grpc://'. If not set, no "
    "tpu is used.")
flags.DEFINE_string('nccl_maxbytes', '256M', 'Maximum size to start with')
flags.DEFINE_integer('nccl_stepfactor', 2,
                     'Multiplication factor between sizes')
flags.DEFINE_integer('nccl_ngpus', 1, 'Number of gpus per thread.')
flags.DEFINE_boolean('nccl_check', False, 'Check correctness of results.')
flags.DEFINE_integer('nccl_nthreads', 1, 'Number of threads per process')
flags.DEFINE_integer('nccl_num_runs',
                     10,
                     'The number of consecutive run.',
                     lower_bound=1)
flags.DEFINE_integer('nccl_seconds_between_runs', 10,
                     'Sleep between consecutive run.')
flags.DEFINE_integer('nccl_iters', 20, 'Number of iterations')
flags.DEFINE_multi_enum('nccl_operations',
                        ['all_reduce', 'all_gather', 'alltoall'], [
                            'all_reduce', 'all_gather', 'broadcast',
                            'reduce_scatter', 'reduce', 'alltoall'
                        ], 'The NCCL collective operation.')
_NCCL_TESTS = flags.DEFINE_boolean('nccl_install_tests', True,
                                   'Install NCCL tests benchmarks.')

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'nccl'
BENCHMARK_CONFIG = """
nccl:
  description: Runs NCCL Benchmark. Specify the number of VMs with --num_vms.
  vm_groups:
    default:
      vm_count: null
      vm_spec:
Exemple #19
0
import os
import torch

from absl import app, flags
from models.evaluation_helpers import test_model
from training.resnet_tune import train_resnet
from training.efficientnet_tune import train_efficientnet
from training.densenet_tune import train_densenet
from data.datasets import DATASETS
from models.common import SPLIT_OPTIONS, AVAILABLE_MODELS

FLAGS = flags.FLAGS

flags.DEFINE_multi_enum(
    "model_version",
    ["resnet18"],
    AVAILABLE_MODELS,
    f"Model version {AVAILABLE_MODELS}",
)

model_folder = os.path.join("models", "saved_models")
out_folder = os.path.join("models", "evals")
data_dir = os.path.join("data")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def main(_argv):
    for model_version in FLAGS.model_version:
        for dataset in DATASETS.keys():
            for label, skip in SPLIT_OPTIONS.items():
                print(
                    f"Training {model_version} model with {label} of data ({dataset})"
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_placement_group
from perfkitbenchmarker.providers.aws import aws_vpc_endpoint
from perfkitbenchmarker.providers.aws import util

flags.DEFINE_string('aws_vpc', None,
                    'The static AWS VPC id to use. Default creates a new one')
flags.DEFINE_string(
    'aws_subnet', None,
    'The static AWS subnet id to use.  Default creates a new one')
flags.DEFINE_bool('aws_efa', False, 'Whether to use an Elastic Fiber Adapter.')
flags.DEFINE_string('aws_efa_version', '1.9.4',
                    'Version of AWS EFA to use (must also pass in --aws_efa).')
flags.DEFINE_integer('aws_efa_count', 1, 'The number of EFAs per instance.')
flags.DEFINE_multi_enum('aws_endpoint', [], ['s3'],
                        'List of AWS endpoints to create')

FLAGS = flags.FLAGS


REGION = 'region'
ZONE = 'zone'


class AwsFirewall(network.BaseFirewall):
  """An object representing the AWS Firewall."""

  CLOUD = aws.CLOUD

  def __init__(self):
    self.firewall_set = set()
Exemple #21
0
                  False,
                  "Whether to restore previous params from checkpoint.",
                  short_name="r")
flags.DEFINE_bool("erase",
                  False,
                  "Whether to erase previous checkpoints and summaries.",
                  short_name="e")
flags.DEFINE_bool("debug",
                  False,
                  "Whether to evaluate model every eval_frequency",
                  short_name="d")

flags.DEFINE_float("learning_rate", 0.001, "Learning rate of the optimizer.")

flags.DEFINE_multi_enum(
    "not_to_train", [],
    ["drift", "diffusion", "mapping", "initial_latents", "likelihood"],
    "Parts of the model not to train.")

FLAGS = flags.FLAGS

PRNGKey = jnp.ndarray
Array = jnp.ndarray
OptState = Any


class ItoGeneralTrainer:
    def __init__(self):
        # Folders
        parent_folder = f"results/{FLAGS.dataset}/{FLAGS.mapping}/drift_{FLAGS.drift}/diffusion_{FLAGS.diffusion}/"
        parent_folder += f"latent_{FLAGS.latent_dims}D"
        self.summary_folder = parent_folder + "/summary/"
import difflib
import os
import sys
from absl import app
from absl import flags
from google.protobuf import text_format
from src.main.protobuf import analysis_pb2
from tools.aquery_differ.resolvers.dep_set_resolver import DepSetResolver

flags.DEFINE_string("before", None, "Aquery output before the change")
flags.DEFINE_string("after", None, "Aquery output after the change")
flags.DEFINE_enum(
    "input_type", "proto", ["proto", "textproto"],
    "The format of the aquery proto input. One of 'proto' and 'textproto.")
flags.DEFINE_multi_enum("attrs", ["cmdline"], ["inputs", "cmdline"],
                        "Attributes of the actions to be compared.")
flags.mark_flag_as_required("before")
flags.mark_flag_as_required("after")

WHITE = "\033[37m%s\033[0m"
CYAN = "\033[36m%s\033[0m"
RED = "\033[31m%s\033[0m"
GREEN = "\033[32m%s\033[0m"


def _colorize(line):
  """Add color to the input string."""
  if not sys.stdout.isatty():
    return line

  if line.startswith("+++") or line.startswith("---"):
        GCP:
          disk_size: 2000
          disk_type: pd-ssd
          mount_point: /scratch_ts
        AWS:
          disk_size: 2000
          disk_type: gp2
          mount_point: /scratch_ts
        Azure:
          disk_size: 2000
          disk_type: StandardSSD_LRS
          mount_point: /scratch_ts
"""

_TESTS = flags.DEFINE_multi_enum(
    'tailbench_tests', ['img-dnn', 'specjbb', 'masstree'],
    ['img-dnn', 'specjbb', 'masstree'],
    'Which tailbench tests to run, all by default')

INSTALL_DIR = '/scratch_ts'


def GetConfig(user_config):
    return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)


def Prepare(benchmark_spec):
    vm = benchmark_spec.vms[0]
    tailbench.Install(vm)
    tailbench.PrepareTailBench(vm)