Example #1
0
    "dataset",
    default='house_prices',
    help="Dataset, choice 'mnist', 'fmnist', 'cifar10','svhn'.")
flags.DEFINE_integer("n_epochs",
                     default=100,
                     help="Number of training epochs to run.")
flags.DEFINE_integer("n_samples", default=1, help="Number of samples to draw.")
flags.DEFINE_float("learning_rate",
                   default=0.001,
                   help="Initial learning rate.")
flags.DEFINE_float("L2", default=0.0, help="L2 penalisation on weights.")
flags.DEFINE_integer("N",
                     default=2,
                     help="Number of hidden layers in discriminant network")
flags.DEFINE_multi_integer(
    "non_targeted_layers",
    default=[],
    help="Layers for which we do not add GNIs. Layer 0 refers to data layer.")
flags.DEFINE_integer("H",
                     default=512,
                     help="Size of hidden layers in discriminant network")
flags.DEFINE_bool("dropout",
                  default=True,
                  help="Dropout for hidden layers AND input")
flags.DEFINE_float("var", default=1.0, help="GNI variance")
flags.DEFINE_string("activation",
                    default="linear",
                    help="Activation function for all hidden layers.")
flags.DEFINE_string("noise_type",
                    default=None,
                    help="Noise type for model, input, gradient, None")
flags.DEFINE_string("noise_mode",
Example #2
0
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
import time
import os
import pickle
import logging
from absl import app, flags
from time import gmtime, strftime
import json

FLAGS = flags.FLAGS
flags.DEFINE_integer('N', 50, 'number of images')
flags.DEFINE_integer('chunkid', 10, 'index of partition chunk')
flags.DEFINE_multi_integer('attack_size', [20, 20], 'size of sticker')
flags.DEFINE_integer('stride', 20, 'stride of sticker')
flags.DEFINE_string('model', 'bagnet33', 'model being evaluated')
flags.DEFINE_string('clip_fn', 'tanh_linear', 'clipping function')
flags.DEFINE_float('a', 0.05, 'clipping parameter A')
flags.DEFINE_float('b', -1, 'clipping parameter B')
flags.DEFINE_float('eps', 5., 'range of perturbation')
flags.DEFINE_integer('nb_iter', 40, 'number of iterations for PGD')
flags.DEFINE_float('stepsize', 0.5, 'stepsize of PGD')
flags.DEFINE_boolean('rand_init', True, 'random initial point in attack')
flags.DEFINE_integer('metabatch_size', 10, 'metabatch size')
flags.DEFINE_string('data_path', '/mnt/data/imagenet',
                    'directory where data are stored')
flags.DEFINE_string('output_root', '/mnt/data/results/advertorch_results',
                    'directory for storing results')
Example #3
0
import numpy as np
from absl import app, flags
import os
import logging

FLAGS = flags.FLAGS
flags.DEFINE_integer('N', 50, 'number of samples')
flags.DEFINE_multi_integer('seed_list', [42, 88, 1234, 666, 777, 999],
                           'list of random seeds')
flags.DEFINE_string('log_root', '/mnt/data/results/',
                    'directory for storing results')


def main(argv):
    NAME = "{}".format(FLAGS.seed_list)
    LOG_PATH = os.path.join(FLAGS.log_root, NAME + '.log')
    print("log to {}".format(LOG_PATH))

    logger = logging.basicConfig(filename=LOG_PATH, level=logging.INFO)
    total = []
    for seed in FLAGS.seed_list:
        np.random.seed(seed)
        sub = np.random.choice(np.arange(50000), size=FLAGS.N, replace=False)
        logging.info("seed: {} \n indices: {}".format(seed, sub))
        total.append(sub[:])
    total = np.array(total).flatten()
    unique, count = np.unique(total, return_counts=True)
    has_rep = np.any([i != 1 for i in count])
    logging.info("has repetition: {}".format(has_rep))
    print(has_rep)
    return has_rep
    def test_write_help_in_xmlformat(self):
        fv = flags.FlagValues()
        # Since these flags are defined by the top module, they are all key.
        flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv)
        flags.DEFINE_integer('nb_iters',
                             17,
                             'An integer flag',
                             lower_bound=5,
                             upper_bound=27,
                             flag_values=fv)
        flags.DEFINE_string('file_path',
                            '/path/to/my/dir',
                            'A test string flag.',
                            flag_values=fv)
        flags.DEFINE_boolean('use_gpu',
                             False,
                             'Use gpu for performance.',
                             flag_values=fv)
        flags.DEFINE_enum('cc_version',
                          'stable', ['stable', 'experimental'],
                          'Compiler version to use.',
                          flag_values=fv)
        flags.DEFINE_list('files',
                          'a.cc,a.h,archive/old.zip',
                          'Files to process.',
                          flag_values=fv)
        flags.DEFINE_list('allow_users', ['alice', 'bob'],
                          'Users with access.',
                          flag_values=fv)
        flags.DEFINE_spaceseplist('dirs',
                                  'src libs bins',
                                  'Directories to create.',
                                  flag_values=fv)
        flags.DEFINE_multi_string('to_delete', ['a.cc', 'b.h'],
                                  'Files to delete',
                                  flag_values=fv)
        flags.DEFINE_multi_integer('cols', [5, 7, 23],
                                   'Columns to select',
                                   flag_values=fv)
        flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'],
                                ['APPLE', 'BANANA', 'CHERRY'],
                                'Compilation flavour.',
                                flag_values=fv)
        # Define a few flags in a different module.
        module_bar.define_flags(flag_values=fv)
        # And declare only a few of them to be key.  This way, we have
        # different kinds of flags, defined in different modules, and not
        # all of them are key flags.
        flags.declare_key_flag('tmod_bar_z', flag_values=fv)
        flags.declare_key_flag('tmod_bar_u', flag_values=fv)

        # Generate flag help in XML format in the StringIO sio.
        sio = io.StringIO() if six.PY3 else io.BytesIO()
        fv.write_help_in_xml_format(sio)

        # Check that we got the expected result.
        expected_output_template = EXPECTED_HELP_XML_START
        main_module_name = sys.argv[0]
        module_bar_name = module_bar.__name__

        if main_module_name < module_bar_name:
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
        else:
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE

        expected_output_template += EXPECTED_HELP_XML_END

        # XML representation of the whitespace list separators.
        whitespace_separators = _list_separators_in_xmlformat(
            string.whitespace, indent='    ')
        expected_output = (expected_output_template % {
            'basename_of_argv0': os.path.basename(sys.argv[0]),
            'usage_doc': sys.modules['__main__'].__doc__,
            'main_module_name': main_module_name,
            'module_bar_name': module_bar_name,
            'whitespace_separators': whitespace_separators
        })

        actual_output = sio.getvalue()
        self.assertMultiLineEqual(expected_output, actual_output)

        # Also check that our result is valid XML.  minidom.parseString
        # throws an xml.parsers.expat.ExpatError in case of an error.
        xml.dom.minidom.parseString(actual_output)
Example #5
0
flags.DEFINE_multi_string(
    "train_file", ["./group_agnostic_fairness/data/toy_data/train.csv"],
    "List of (string) path(s) to training file(s).")
flags.DEFINE_multi_string("test_file",
                          ["./group_agnostic_fairness/data/toy_data/test.csv"],
                          "List of (string) path(s) to evaluation file(s).")

# # If the model has an adversary, the features for adversary are constructed
# # in the corresponding custom estimator implementation by filtering feature_columns passed to the learner.
flags.DEFINE_bool(
    "include_sensitive_columns", False,
    "Set the flag to include protected features in the feature_columns of the learner."
)

# Flags for setting common model parameters for all approaches
flags.DEFINE_multi_integer("primary_hidden_units", [64, 32],
                           "Hidden layer sizes of main learner.")
flags.DEFINE_integer("embedding_dimension", 32,
                     "Embedding size; if 0, use one hot.")
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_float("primary_learning_rate", 0.001,
                   "learning rate for main learner.")
flags.DEFINE_string("optimizer", "Adagrad", "Name of the optimizer to use.")
flags.DEFINE_string("activation", "relu", "Name of the activation to use.")

# # Flags for approaches that have an adversary
# # Currently only for ''robust_learning'' Model and ''adversarial_subgroup_reweighting'' Model.
flags.DEFINE_multi_integer("adversary_hidden_units", [32],
                           "Hidden layer sizes of adversary.")
flags.DEFINE_float("adversary_learning_rate", 0.001,
                   "learning rate for adversary.")
Example #6
0
flags.DEFINE_string("train_input_pattern", None,
                    "Input file path pattern used for training.")
flags.DEFINE_string("eval_input_pattern", None,
                    "Input file path pattern used for eval.")
flags.DEFINE_string("test_input_pattern", None,
                    "Input file path pattern used for test.")

# Model config.
flags.DEFINE_string(
    "vocab_file_path", None, "Path to vocab file used for tokenizing the "
    "Antique dataset.")
flags.DEFINE_integer(
    "vocab_size", 30522, "Size of the vocab file used for "
    "tokenizing the Antique dataset.")
flags.DEFINE_integer("embedding_dimension", 20, "Size of embedding.")
flags.DEFINE_multi_integer("hidden_layer_dims", [20, 10],
                           "Number of units in each hidden layer.")

# Training config.
flags.DEFINE_string("loss", tfr.keras.losses.RankingLossKey.APPROX_NDCG_LOSS,
                    "See tfr.keras.losses.RankingLossKey.")
flags.DEFINE_float("learning_rate", 0.005, "Learning rate for optimizer.")
flags.DEFINE_integer("train_batch_size", 16,
                     "Number of input records used per batch for training.")
flags.DEFINE_integer("eval_batch_size", 64,
                     "Number of input records used per batch for eval.")
flags.DEFINE_integer("num_epochs", 100,
                     "Number of passes over the training data.")
flags.DEFINE_string(
    "model_dir", None, "The directory where the model weights and "
    "training/evaluation summaries are stored.")
flags.DEFINE_integer("num_train_steps", 100000,
Example #7
0
flags.DEFINE_boolean('fine_tune_batch_norm', True,
                     'Fine tune the batch norm parameters or not.')

flags.DEFINE_float('min_scale_factor', 0.5,
                   'Mininum scale factor for data augmentation.')

flags.DEFINE_float('max_scale_factor', 2.,
                   'Maximum scale factor for data augmentation.')

flags.DEFINE_float('scale_factor_step_size', 0.25,
                   'Scale factor step size for data augmentation.')

# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
                           'Atrous rates for atrous spatial pyramid pooling.')

flags.DEFINE_integer('output_stride', 16,
                     'The ratio of input to output spatial resolution.')

# Hard example mining related flags.
flags.DEFINE_integer(
    'hard_example_mining_step', 0,
    'The training step in which exact hard example mining kicks off. Note we '
    'gradually reduce the mining percent to the specified '
    'top_k_percent_pixels. For example, if hard_example_mining_step=100K and '
    'top_k_percent_pixels=0.25, then mining percent will gradually reduce from '
    '100% to 25% until 100K steps after which we only mine top 25% pixels.')

flags.DEFINE_float(
    'top_k_percent_pixels', 1.0,
# ==============================================================================
r"""Randomize all weights in a tflite file."""

from absl import app
from absl import flags

from tensorflow.lite.tools import flatbuffer_utils

FLAGS = flags.FLAGS

flags.DEFINE_string('input_tflite_file', None,
                    'Full path name to the input TFLite file.')
flags.DEFINE_string('output_tflite_file', None,
                    'Full path name to the output randomized TFLite file.')
flags.DEFINE_multi_integer(
    'buffers_to_skip', [], 'Buffer indices in the TFLite model to be skipped, '
    'i.e., to be left unmodified.')
flags.DEFINE_integer('random_seed', 0, 'Input to the random number generator.')

flags.mark_flag_as_required('input_tflite_file')
flags.mark_flag_as_required('output_tflite_file')


def main(_):
    model = flatbuffer_utils.read_model(FLAGS.input_tflite_file)
    flatbuffer_utils.randomize_weights(model, FLAGS.random_seed,
                                       FLAGS.buffers_to_skip)
    flatbuffer_utils.write_model(model, FLAGS.output_tflite_file)


if __name__ == '__main__':
Example #9
0
flags.DEFINE_enum('method', 'tree', ['tree', 'boosting', 'knn', 'svm', 'nn'],
                  'Choose a method')

flags.DEFINE_integer('tree_max_depth', 10, 'Max depth for the tree.')

flags.DEFINE_enum('tree_criterion', 'gini', ['gini', 'entropy', 'random'],
                  'Tree branch split methods')

flags.DEFINE_integer('boosting_num_trees', 5,
                     'Number of trees for the boosting method.')

flags.DEFINE_integer('knn_k', 2, 'K in KNN')

flags.DEFINE_string('svm_kernel', 'linear', 'Kernel type')

flags.DEFINE_multi_integer('nn_depths', [], 'Depths of fully-connected layers')

flags.DEFINE_integer('nn_epochs', 5, 'Epochs to train for NN')

flags.DEFINE_float('train_ratio', 1,
                   'A percentage of training data is sampled for training')


def get_toy_dataset():
    """
  Toy dataset: a binary classification problem on the 2D plane.
  Toy dataset generation:
   1). choose 2 centers and assign to opposite labels;
   2). sample 1000 samples from an isotropical Gaussian distribution;
   3). find a concentral circle that split half of the samples from the other half;
   4). flip the labels for the samples outside of the circle.
Example #10
0
from benchmarks.driver.server.config import presets
from benchmarks.driver.tfserver import TFDistServer
from benchmarks.driver.workload import WTL, Executor
from benchmarks.driver.utils import atomic_directory, unique
from benchmarks.exps import parse_output_float, maybe_forced_preset


logger = logging.getLogger(__name__)
FLAGS = flags.FLAGS
TBatchSize = Union[str, int]

flags.DEFINE_boolean('basic_only', False, 'Only run basic 20 iterations JCT only')
flags.DEFINE_float('threshold', 0.1, 'What ratio is actual time allowed to have within target time')
flags.DEFINE_integer('max_chance', 10, 'How many times to try')
flags.DEFINE_boolean('resume', False, 'Check and skip existing configurations')
flags.DEFINE_multi_integer('extra_mins', [1, 5, 10], 'Extra lengths')
flags.DEFINE_boolean('do_tfdist', True, 'Also measure TFDist JCT')
flags.DEFINE_boolean('is_mps', False, 'MPS is on for TF jobs')


def select_workloads(argv):
    # type: (Iterable[str]) -> Iterable[(str, TBatchSize)]
    """Select workloads based on commandline"""
    if not argv:
        names = WTL.known_workloads.keys()
    else:
        names = unique((
            name
            for piece in argv
            for name in piece.split(',')
        ), stable=True)
Example #11
0
flags.DEFINE_integer('d_ffn',
                     default=1024,
                     help='Dimension of pointwise feed forward networks.',
                     lower_bound=1)
flags.DEFINE_integer('num_layers',
                     default=12,
                     help='Number of stochastic blocks/encoder layers.',
                     lower_bound=0)
flags.DEFINE_integer('mem_len',
                     default=32,
                     help='Number of previous values to use as memory.')
flags.DEFINE_float('dropout_rate', default=0.1, help='Rate to drop units.')
flags.DEFINE_multi_integer(
    'cutoffs',
    default=[],
    help='Cutoffs to use for adaptive softmax layer. Do NOT\
         enter the final cutoff (the vocab size). This will \
         be inferred from your sp_model_file. Cutoffs may be \
         entered by repated use of --cutoffs=[NUMBER].')
flags.DEFINE_integer(
    'proj_factor',
    default=4,
    help=
    'Reduction factor of d_model in adaptive softmax for successive clusters')
flags.DEFINE_boolean(
    'straight_through',
    default=False,
    help='Set True to enable straight_through gradient in RelaxedOneHot layer.'
)
flags.DEFINE_multi_integer(
    'proj_dims',
from absl import logging

from Bio import AlignIO
from Bio import SeqIO

import numpy as np
from sklearn import cluster
import tensorflow as tf

from dedal import vocabulary

flags.DEFINE_string(
    'outdir', None,
    'Directory in which to save all preprocessed output files.')
flags.DEFINE_multi_integer(
    'pid_ths', [100],
    'Sequences with PIDs above these values will be clustered.')
flags.DEFINE_bool('encode_seqs', True,
                  'Whether to store protein sequences already encoded.')
flags.DEFINE_bool(
    'encode_ss', False,
    'Whether to store secondary structure sequences already encoded.')
flags.DEFINE_integer('num_shards', 30, 'Number of shards for TFRecords.')
flags.mark_flag_as_required('outdir')

FLAGS = flags.FLAGS

# URLs for database files (valid as of 04/04/2021)
URL_PFAM_SEED = 'http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam34.0/Pfam-A.seed.gz'
URL_PFAM_CLANS = 'http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam34.0/Pfam-A.clans.tsv.gz'
Example #13
0
FLAGS = flags.FLAGS

flags.DEFINE_enum_class(
    'backbone',
    default=BACKBONE.MOBILENETV2,
    enum_class=BACKBONE,
    help=
    "Select network backbone, One of {'MOBILENETV2','DARKNET53','EFFICIENTNET'}"
)
flags.DEFINE_integer('batch_size',
                     default=8,
                     lower_bound=0,
                     help="Train batch size")
flags.DEFINE_multi_integer('epochs',
                           default=[10, 10],
                           lower_bound=0,
                           help="Frozen train epochs and Full train epochs")
flags.DEFINE_string('export', default='tfjs/keras', help="Export path")
flags.DEFINE_string('input', default=None, help="Input data for various mode")
flags.DEFINE_multi_integer('input_size',
                           default=(380, 380),
                           lower_bound=0,
                           help="Input size")
flags.DEFINE_string('model', default=None, help="Model path")
flags.DEFINE_enum_class(
    'mode',
    default=MODE.TFJS,
    enum_class=MODE,
    help=
    "Select exec mode, One of {'TRAIN','IMAGE','VIDEO','TFLITE','SERVING','MAP','PRUNE'}"
)
Example #14
0
from cse547.loss import MultiLabelCrossEntropy
from cse547.models import LinearClassifier, MultiLayerPerceptron
from cse547.s3 import serialize_object
from cse547.train import ModelSaver, TrainingEvaluator, TrainingSummarizer, train

# Data flags
flags.DEFINE_string('training_data', 'data/patch_features_tiny/train2014.p',
                    'Data to train model.')
flags.DEFINE_string('validation_data', 'data/patch_features_tiny/val2014.p',
                    'Data to validate model.')

# Model flags
flags.DEFINE_enum('model', 'linear', ['linear', 'multilayer_perceptron'],
                  'The model type to use.')
flags.DEFINE_multi_integer(
    'model_multilayer_perceptron_hidden_units', [256],
    'The number of hidden units for the multi-layer perceptron.')

# Training flags
flags.DEFINE_integer('train_batch_size', 8, 'Batch sizes during training.')
flags.DEFINE_integer(
    'train_epochs', 32,
    'The number of times to iterate over the data in training.')
flags.DEFINE_float('train_l2_regularization', 0.5,
                   'L2 regularization in the loss function.')
flags.DEFINE_integer('train_summary_steps', 500,
                     'How often to summarize the model.')
flags.DEFINE_integer('train_evaluation_steps', 1000,
                     'How often to evaluate the model.')

# Training optimizer flags.
Example #15
0
flags.DEFINE_float('lr', 1e-3, 'Learning rate of the network.')
flags.DEFINE_integer('epoch', 50, 'Epoch of training')
flags.DEFINE_integer('training_batch_size', 8, 'training batch size')
flags.DEFINE_integer('test_batch_size', 16, 'test batch size')
flags.DEFINE_boolean(
    'finetuning', True,
    'If True, only the backend of DNN will be trained. Otherwise, feature extractor will also be trained'
)
flags.DEFINE_boolean(
    'load_full_weights', False,
    'Load full COCO pretrained weights including those of the last detection layers'
)

flags.DEFINE_multi_integer(
    'model_size', (608, 608),
    'Resolution of DNN input, must be the multiples of 32')
flags.DEFINE_integer('max_out_size', 1,
                     'maximum detected object amount of one class')
flags.DEFINE_float('iou_threshold', 0.5, 'threshold of non-max suppression')
flags.DEFINE_float('confid_threshold', 0.5, 'threshold of confidence')

flags.DEFINE_float('brightness_delta', 0.3,
                   'brightness_delta of data augmentation')
flags.DEFINE_multi_float('contrast_range', (0.5, 1.5),
                         'contrast_range of data augmentation')
flags.DEFINE_float('hue_delta', 0.2,
                   'hue_delta of data augmentation, only between (0, 0.5)')
flags.DEFINE_float('probability', 0.8, 'percentage of augmented images')

# Anchors of k-means threshold=0.98
Example #16
0
def define_flags():
    #####################
    # General app flags #
    #####################
    flags.DEFINE_string('username', 'username',
                        'Username for scihub.copernicus.eu')
    flags.DEFINE_string('password', 'password',
                        'Username for scihub.copernicus.eu')
    flags.DEFINE_string(
        'data_directory', '/data/',
        'Data directory used for storing downloaded and processed data')
    flags.DEFINE_bool('download', True, 'Download the files')
    flags.DEFINE_bool('process_tiles', True, 'Process the downloaded files')
    flags.DEFINE_bool('compress', True,
                      'Compress pre- and post-processed geotiff files')
    flags.DEFINE_string('logging_verbosity', 'info',
                        'Logging verbosity (debug|info|warning|error|fatal).')

    ###############
    # Query flags #
    ###############
    flags.DEFINE_string(
        'order_id', 'Empty',
        'Order id for postprocessing of already downloaded file')
    flags.DEFINE_string('geojson', 'denmark_without_bornholm',
                        'Name of the geojson file defining the area to query')
    flags.DEFINE_string('startdate', '20190803', 'The sensing start date')
    flags.DEFINE_string('enddate', '20190808', 'The sensing end date')
    flags.DEFINE_string(
        'satellite', 'all',
        'The Sentinel satellite(s) to get data from (s1/s2/s3/s5p/all)')
    # Sentinel-1
    flags.DEFINE_multi_integer(
        's1_relative_orbit', [44],
        'Relative orbit number (0 => all relative orbits)')
    # Sentinel-2
    flags.DEFINE_multi_integer(
        's2_relative_orbit', [8, 108],
        'Relative orbit number (0 => all relative orbits)')
    flags.DEFINE_integer('s2_max_cloudcoverage', 100,
                         'The maximum allowed cloud coverage')
    # Sentinel-3
    # Sentinel-5p

    ###################
    # Processor flags #
    ###################
    # General
    flags.DEFINE_bool('overwrite', True, 'Overwrite existing products')
    flags.DEFINE_bool('compress_gtiff', True, 'Compress GTiff files')
    # Sentinel-1
    flags.DEFINE_integer(
        's1_num_proc', 2,
        'Number of parallel processes for Sentinel-1 processing (approx. 20 GB RAM per process)'
    )
    flags.DEFINE_bool('s1_del_intermediate', False,
                      'Delete the intermediate Sentinel-1 processing data')
    flags.DEFINE_string(
        's1_output_crs', 'EPSG:32632',
        'Coordinate reference system for the output combined geotiff')
    # Sentinel-2
    flags.DEFINE_integer(
        's2_num_proc', 6,
        'Number of parallel processes for Sentinel-1 processing (approx. 8 GB RAM per process)'
    )
    flags.DEFINE_bool(
        'delete_jp2_files', True,
        'Delete jp2 files after they have been converted to GTiff')
    flags.DEFINE_bool('s2_ndvi', True,
                      'Calculate NDVI index of Sentinel-2 data')
Example #17
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from absl import flags
from moonlight.models.base import glyph_patches
from moonlight.models.base import hyperparameters
from moonlight.protobuf import musicscore_pb2
import tensorflow as tf

FLAGS = flags.FLAGS

flags.DEFINE_multi_integer(
    'layer_dims', [20, 20],
    'Dimensions of each hidden layer. --layer_dims=0 indicates logistic'
    ' regression (predictions directly connected to inputs through a sigmoid'
    ' layer).')
flags.DEFINE_string(
    'activation_fn', 'sigmoid',
    'The name of the function (under tf.nn) to apply after each layer.')
flags.DEFINE_float('learning_rate', 0.1, 'FTRL learning rate')
flags.DEFINE_float('l1_regularization_strength', 0.01, 'L1 penalty')
flags.DEFINE_float('l2_regularization_strength', 0, 'L2 penalty')
flags.DEFINE_float('dropout', 0, 'Dropout to apply to all hidden nodes.')


def _custom_metrics(features, labels, predictions):
    """Metrics to be computed on every evaluation run, viewable in TensorBoard."""
    del features  # Unused.
    return {
Example #18
0
FLAGS = flags.FLAGS

flags.DEFINE_enum_class(
    'backbone',
    default=BACKBONE.MOBILENETV2,
    enum_class=BACKBONE,
    help=
    "Select network backbone, One of {'MOBILENETV2','DARKNET53','EFFICIENTNET'}"
)
flags.DEFINE_integer('batch_size',
                     default=8,
                     lower_bound=0,
                     help="Train batch size")
flags.DEFINE_string('config', default=None, help="Config path")
flags.DEFINE_multi_integer('epochs',
                           default=[10, 10],
                           lower_bound=0,
                           help="Frozen train epochs and Full train epochs")
flags.DEFINE_string('export', default='export_model/8', help="Export path")
flags.DEFINE_string('input', default=None, help="Input data for various mode")
flags.DEFINE_multi_integer('input_size',
                           default=(380, 380),
                           lower_bound=0,
                           help="Input size")
flags.DEFINE_string('log_directory', default=None, help="Log directory")
flags.DEFINE_string('model', default=None, help="Model path")
flags.DEFINE_enum_class(
    'mode',
    default=MODE.TRAIN,
    enum_class=MODE,
    help=
    "Select exec mode, One of {'TRAIN','TRAIN_BACKBONE','IMAGE','VIDEO','TFLITE','SERVING','MAP','PRUNE'}"
Example #19
0
         'See autoaugment.py for detailed description.')

flags.DEFINE_integer(
    'randaug_magnitude', default=None,
    help='If RandAug is used, what should the magnitude be. '
         'See autoaugment.py for detailed description.')

# Inference configuration.
flags.DEFINE_bool(
    'add_warmup_requests', False,
    'Whether to add warmup requests into the export saved model dir,'
    'especially for TPU inference.')
flags.DEFINE_string('model_name', 'resnet',
                    'Serving model name used for the model server.')
flags.DEFINE_multi_integer(
    'inference_batch_sizes', [8],
    'Known inference batch sizes used to warm up for each core.')
flags.DEFINE_bool(
    'export_moving_average', False,
    'Whether to export model using moving average variables.')


# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]


def get_lr_schedule(train_steps, num_train_images, train_batch_size):
  """learning rate schedule."""
  steps_per_epoch = np.floor(num_train_images / train_batch_size)
Example #20
0
import tensorflow as tf



tfkl = tf.keras.layers
FLAGS = flags.FLAGS

#################################### I/O #######################################
flags.DEFINE_string('outdir',
                    'ipdf_output/',
                    'The directory in which to save results and images.')
flags.DEFINE_bool('save_models', True, 'Whether to save the vision and IPDF'
                  ' models at the end of training.')
################################ Model Specs ###################################
flags.DEFINE_multi_integer('head_network_specs',
                           [256]*2,
                           'The sizes of the dense layers in the head network.')
#################################### Data ######################################
flags.DEFINE_multi_string('symsol_shapes', ['tet'],
                          'Can be any subset of the 8 shapes of SYMSOL I & II: '
                          'tet, cube, icosa, cyl, cone, tetX, cylO, sphereX, or'
                          ' \'symsol1\' for the first five.')
flags.DEFINE_integer('downsample_continuous_gt', 0,
                     'Whether, and how much, to downsample the cone and '
                     'cylinder ground truth rotations, which can make '
                     'evaluation slow.')
################################# Training #####################################
flags.DEFINE_integer('number_training_iterations',
                     10_000,
                     'The number of iterations to train.')
flags.DEFINE_float('learning_rate', 1e-4, 'The learning rate.')
Example #21
0
flags.DEFINE_string("output_dir", None,
                    "Path for writing preprocessed NQ examples.")

flags.DEFINE_integer("stride", 2048,
                     "Token length stride for splitting documents.")

flags.DEFINE_integer("seq_len", 4096, "Total window size in word pieces.")

flags.DEFINE_integer("global_seq_len", 230, "Total number of global tokens.")

flags.DEFINE_integer("question_len", 32,
                     "Maximum question length in word pieces.")

flags.DEFINE_multi_integer(
    "global_token_types", [0, 1, 2],
    "Global token types for sentence, CLS, and question "
    "word piece tokens respectively.")

flags.DEFINE_enum("tokenizer_type", "BERT", ["BERT", "ALBERT"],
                  "Specifies which tokenizers to use.")

flags.DEFINE_string(
    "vocab_file", None,
    "Path to a wordpiece vocabulary to be used with the tokenizer. "
    "This is ignored when using the ALBERT tokenizer if 'spm_model_path' is "
    "specified.")

flags.DEFINE_boolean(
    "do_lower_case", True,
    "Whether to lower case text. This is ignored when using "
    "the ALBERT tokenizer if 'spm_model_path' is specified.")
Example #22
0
                  'constant. Training on cifar should always use the schedule '
                  ', this flag is mostly for testing purpose.')
flags.DEFINE_float('weight_decay', 0.001, 'Weight decay coefficient.')
flags.DEFINE_integer('run_seed', 0,
                     'Seed to use to generate pseudo random number during '
                     'training (for dropout for instance). Has no influence on '
                     'the dataset shuffling.')
flags.DEFINE_bool('use_rmsprop', False, 'If True, uses RMSprop instead of SGD')
flags.DEFINE_enum('lr_schedule', 'cosine', ['cosine', 'exponential'],
                  'Learning rate schedule to use.')

# Additional flags that don't affect the model.
flags.DEFINE_integer('save_progress_seconds', 3600, 'Save progress every...s')
flags.DEFINE_multi_integer(
    'additional_checkpoints_at_epochs', [],
    'Additional epochs when we should save the model for later analysis. '
    'No matter the value of this flag, the most recent version of the model '
    'will be saved regularly to resume training if needed.')
flags.DEFINE_bool('also_eval_on_training_set', False,
                  'If set to true, the model will also be evaluated on the '
                  '(non-augmented) training set at the end of each epoch.')
flags.DEFINE_bool('compute_top_5_error_rate', False,
                  'If true, will also compute top 5 error rate.')
flags.DEFINE_float('label_smoothing', 0.0, 'Label smoothing for cross entropy.')
flags.DEFINE_float('ema_decay', 0.0, 'If not zero, use EMA on all weights.')
flags.DEFINE_bool('no_weight_decay_on_bn', False,
                  'If set to True, will not apply weight decay on the batch '
                  'norm parameters.')
flags.DEFINE_integer('evaluate_every', 1,
                     'Evaluate on the test set every n epochs.')
Example #23
0
flags.DEFINE_string('exp_name', 'exp_nuisance_test', 'Experiment name.')
flags.DEFINE_string('output', None, 'Output path.')
flags.DEFINE_string('log_path', None, 'Filepath to save the execution state.')

flags.DEFINE_enum('model', None, ['dlvm', 'lrmf'],
                  'Data model class, can be `dlvm` or `lrmf`.')
flags.DEFINE_integer('n_observations', None, 'Number of observations.')
flags.DEFINE_integer('p_ambient', None, 'Dimesion of the ambient space.')
flags.DEFINE_float('y_snr', None, 'SNR in outcome generation (y0, y1).')
flags.DEFINE_float('x_snr', None, 'SNR in covariate generation (X).')
flags.DEFINE_float('prop_miss', None, 'Proportion of MCAR missing values.')
flags.DEFINE_bool('regularize', None, 'Regularize ATE.')
flags.DEFINE_integer('n_seeds', 5, 'Number of seed replications.')
flags.DEFINE_float('d_over_p', None, 'Ratio of d over p.')
flags.DEFINE_multi_integer(
    'd_latent', None,
    'Dimension of latent space (specify either `d_over_p` or `d`).')
flags.DEFINE_float('mu_z', None, 'Expectation of distribution on Z.')
flags.DEFINE_float('sig_z', None, 'Variance of distribution on Z.')
flags.DEFINE_float('sig_xgivenz', None,
                   'Value of fixed variance for X|Z=z, must be positive')

flags.DEFINE_integer('n_imputations', None, 'Number of imputations.')

flags.DEFINE_integer('miwae_d_offset', None,
                     'proxy of dim. of latent space given by d + offset.')
flags.DEFINE_float('miwae_mu_prior', None,
                   'Expectation of prior distribution on Z for MIWAE.')
flags.DEFINE_float('miwae_sig_prior', None,
                   'Variance of prior distribution on Z for MIWAE.')
flags.DEFINE_integer('miwae_n_samples_zmul', None,
Example #24
0
    "The number of steps at "
    "which the global step information is logged.",
)

# Inference configuration.
flags.DEFINE_bool(
    "add_warmup_requests",
    False,
    "Whether to add warmup requests into the export saved model dir,"
    "especially for TPU inference.",
)
flags.DEFINE_string("model_name", "resnet",
                    "Serving model name used for the model server.")
flags.DEFINE_multi_integer(
    "inference_batch_sizes",
    [8],
    "Known inference batch sizes used to warm up for each core.",
)

# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]


def get_lr_schedule(train_steps, num_train_images, train_batch_size):
    """learning rate schedule."""
    steps_per_epoch = np.floor(num_train_images / train_batch_size)
    train_epochs = train_steps / steps_per_epoch
    return [  # (multiplier, epoch to start) tuples
        (1.0, np.floor(5 / 90 * train_epochs)),
Example #25
0
from acme import specs
from acme.agents.jax import mbop
from acme.datasets import tfds
from acme.examples.offline import helpers as gym_helpers
from acme.jax import running_statistics
from acme.utils import loggers
import jax
import optax
import tensorflow_datasets

# Training flags.
_NUM_NETWORKS = flags.DEFINE_integer('num_networks', 10,
                                     'Number of ensemble networks.')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 64, 'Batch size.')
_HIDDEN_LAYER_SIZES = flags.DEFINE_multi_integer(
    'hidden_layer_sizes', [64, 64], 'Sizes of the hidden layers.')
_NUM_SGD_STEPS_PER_STEP = flags.DEFINE_integer(
    'num_sgd_steps_per_step', 1,
    'Denotes how many gradient updates perform per one learner step.')
_NUM_NORMALIZATION_BATCHES = flags.DEFINE_integer(
    'num_normalization_batches', 50,
    'Number of batches used for calculating the normalization statistics.')
_EVALUATE_EVERY = flags.DEFINE_integer('evaluate_every', 20,
                                       'Evaluation period.')
_EVALUATION_EPISODES = flags.DEFINE_integer('evaluation_episodes', 10,
                                            'Evaluation episodes.')
_SEED = flags.DEFINE_integer('seed', 0,
                             'Random seed for learner and evaluator.')

# Environment flags.
_ENV_NAME = flags.DEFINE_string('env_name', 'HalfCheetah-v2',
Example #26
0
flags.DEFINE_float(
    'anomaly_threshold', 3, 'a value greater than 1.  Suppress anomalies'
    'where the mean-square-gradients for a step exceed the long-term average'
    'by at least this factor')

# Dataset settings.
flags.DEFINE_string('dataset_name', 'pascal_voc_seg',
                    'Name of the segmentation dataset.')
flags.DEFINE_string('train_split', 'train_aug',
                    'Which split of the dataset to be used for training')
flags.DEFINE_string('eval_split', 'val',
                    'Which split of the dataset used for evaluation')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')

# Preprocess settings.
flags.DEFINE_multi_integer('crop_size', [513, 513],
                           'Image crop size [height, width].')
flags.DEFINE_float('min_scale_factor', 0.5,
                   'Mininum scale factor for data augmentation.')
flags.DEFINE_float('max_scale_factor', 2,
                   'Maximum scale factor for data augmentation.')
flags.DEFINE_float('scale_factor_step_size', 0.25,
                   'Scale factor step size for data augmentation.')

# Model settings.
flags.DEFINE_multi_integer('atrous_rates', [6, 12, 18],
                           'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
                     'The ratio of input to output spatial resolution.')
flags.DEFINE_boolean('fine_tune_batch_norm', True,
                     'Fine tune the batch norm parameters or not.')
flags.DEFINE_boolean('upsample_logits', True,
Example #27
0
    'train_batch_size', 256,
    'Batch size to use for train/eval evaluation. For GPU '
    'this is batch size as expected. If \"use_tpu\" is set,'
    'final batch size will be = train_batch_size * num_tpu_cores')

flags.DEFINE_integer('conv_width', 256 if go.N == 19 else 32,
                     'The width of each conv layer in the shared trunk.')

flags.DEFINE_integer('fc_width', 256 if go.N == 19 else 64,
                     'The width of the fully connected layer in value head.')

flags.DEFINE_integer('trunk_layers', go.N,
                     'The number of resnet layers in the shared trunk.')

flags.DEFINE_multi_integer(
    'lr_boundaries', [400000, 600000],
    'The number of steps at which the learning rate will decay')

flags.DEFINE_multi_float('lr_rates', [0.01, 0.001, 0.0001],
                         'The different learning rates')

flags.DEFINE_float('l2_strength', 1e-4,
                   'The L2 regularization parameter applied to weights.')

flags.DEFINE_float(
    'value_cost_weight', 1.0,
    'Scalar for value_cost, AGZ paper suggests 1/100 for '
    'supervised learning')

flags.DEFINE_float('sgd_momentum', 0.9,
                   'Momentum parameter for learning rate.')
                          'total batch size.')
flags.DEFINE_boolean('USE_ADASUM', default=False,
                     help='use adasum algorithm to do reduction')
flags.DEFINE_integer('LOG_INTERVAL', default=10,
                     help='how many batches to wait before logging training status')

# Default settings from https://arxiv.org/abs/1706.02677.
flags.DEFINE_string('MODEL_NAME', 'MobileNetV2',
                    help='The name of the architecture to train.')
flags.DEFINE_string('DATASET_NAME', 'CIFAR100',
                    help='The name of the dataset to train.')
flags.DEFINE_multi_float('DATA_MEAN', [0.5071, 0.4867, 0.4408],
                         help='mean value of dataset')
flags.DEFINE_multi_float('DATA_STD', [0.2675, 0.2565, 0.2761],
                         help='standard deviation value of dataset')
flags.DEFINE_multi_integer('DATA_SHAPE', [3, 32, 32],
                           help='data dimension of dataset')
flags.DEFINE_list('BLOCK_ARGS', ['wm1.0_rn8_s1',
                                 't1_c16_n1_s1',
                                 't6_c24_n2_s1',
                                 't6_c32_n3_s2',
                                 't6_c64_n4_s2',
                                 't6_c96_n3_s1',
                                 't6_c160_n3_s2',
                                 't6_c320_n1_s1'],
                  help='argument of blocks in EfficientNet style')
flags.DEFINE_integer('BATCH_SIZE', default=128,
                     help='input batch size for training')
flags.DEFINE_integer('VALID_BATCH_SIZE', default=32,
                     help='input batch size for validation')
flags.DEFINE_integer('TEST_BATCH_SIZE', default=1,
                     help='input batch size for test')
Example #29
0
flags.DEFINE_string(
    'backbone_ckpt', '',
    'Location of the ResNet50 checkpoint to use for model '
    'initialization.')
flags.DEFINE_string('hparams', '',
                    'Comma separated k=v pairs of hyperparameters.')
flags.DEFINE_integer('num_cores',
                     default=8,
                     help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer('num_cores_per_replica',
                     default=8,
                     help='Number of TPU cores per'
                     'replica when using spatial partition.')
flags.DEFINE_multi_integer(
    'input_partition_dims', [1, 4, 2, 1],
    'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
                     'evaluation.')
flags.DEFINE_integer('iterations_per_loop', 100,
                     'Number of iterations per TPU training loop')
flags.DEFINE_string(
    'training_file_pattern', None,
    'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
                    'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string('val_json_file', None,
                    'COCO validation JSON containing golden bounding boxes.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
Example #30
0
flags.DEFINE_integer('baseline_bz', 32,
                     'baseline batch size for linear learning rate schedule')

flags.DEFINE_integer('warmup_batch_idx', 100,
                     'warm up batch idx for linear learning rate schedule')

# a bit unintuitively used in do_training()...
flags.DEFINE_integer(
    'warmup_epochs', None,
    'overrides warmup_batch_idx, defining number of warmup '
    'epochs to train')

flags.DEFINE_boolean('anneal', False, 'use geometric annealing')
flags.DEFINE_float('anneal_rate', 0.1, 'geometric factor to anneal by')
flags.DEFINE_multi_integer('anneal_epochs', [60, 120, 180],
                           'epochs to anneal on. overrides anneal_iters.')
flags.DEFINE_multi_integer('anneal_iters', [80 * 300, 120 * 300, 160 * 300],
                           'iterations to anneal on.')


def lr_schedule(batch_size, batch_idx, optimizer):
    """ schedule the learning rate by linear LR schedule  w/warmup """
    if flags.FLAGS.linear_lr or flags.FLAGS.sqrt_lr:
        bs_ratio = float(batch_size) / flags.FLAGS.baseline_bz
        baseline_lr = flags.FLAGS.learning_rate
        final_lr = baseline_lr * bs_ratio
        if flags.FLAGS.sqrt_lr:
            final_lr = math.sqrt(final_lr)
        if flags.FLAGS.sqrt_lr or bs_ratio < 1 or batch_idx >= flags.FLAGS.warmup_batch_idx:
            lr = final_lr
        else: