示例#1
0
文件: dual_net.py 项目: TUBOSS/minigo
                     'The width of the value conv layer.')

flags.DEFINE_integer('fc_width', 256 if go.N == 19 else 64,
                     'The width of the fully connected layer in value head.')

flags.DEFINE_integer('trunk_layers', go.N,
                     'The number of resnet layers in the shared trunk.')

flags.DEFINE_multi_integer(
    'lr_boundaries', [400000, 600000],
    'The number of steps at which the learning rate will decay')

flags.DEFINE_multi_float('lr_rates', [0.01, 0.001, 0.0001],
                         'The different learning rates')

flags.DEFINE_float('l2_strength', 1e-4,
                   'The L2 regularization parameter applied to weights.')

flags.DEFINE_float(
    'value_cost_weight', 1.0,
    'Scalar for value_cost, AGZ paper suggests 1/100 for '
    'supervised learning')

flags.DEFINE_float('sgd_momentum', 0.9,
                   'Momentum parameter for learning rate.')

flags.DEFINE_string(
    'work_dir', None, 'The Estimator working directory. Used to dump: '
    'checkpoints, tensorboard logs, etc..')

flags.DEFINE_bool('use_tpu', False, 'Whether to use TPU for training.')
示例#2
0
    'Set:Get key pattern. G for Gaussian distribution, R for '
    'uniform Random, S for Sequential. Defaults to R:R.')
MEMTIER_LOAD_KEY_MAXIMUM = flags.DEFINE_integer(
    'memtier_load_key_maximum', None, 'Key ID maximum value to load. '
    'The range of keys will be from 1 (min) to this specified max key value. '
    'If not set, defaults to memtier_key_maximum. Setting this different from '
    'memtier_key_maximum allows triggering of eviction behavior.')
MEMTIER_KEY_MAXIMUM = flags.DEFINE_integer(
    'memtier_key_maximum', 10000000, 'Key ID maximum value. The range of keys '
    'will be from 1 (min) to this specified max key value.')
MEMTIER_LATENCY_CAPPED_THROUGHPUT = flags.DEFINE_bool(
    'latency_capped_throughput', False,
    'Measure latency capped throughput. Use in conjunction with '
    'memtier_latency_cap. Defaults to False. ')
MEMTIER_LATENCY_CAP = flags.DEFINE_float(
    'memtier_latency_cap', 1.0, 'Latency cap in ms. Use in conjunction with '
    'latency_capped_throughput. Defaults to 1ms.')
MEMTIER_RUN_MODE = flags.DEFINE_enum(
    'memtier_run_mode', MemtierMode.NORMAL_RUN, MemtierMode.ALL,
    'Mode that the benchmark is set to. NORMAL_RUN measures latency and '
    'throughput, MEASURE_CPU_LATENCY measures single threaded latency at '
    'memtier_cpu_target. When measuring CPU latency flags for '
    'clients, threads, and pipelines are ignored and '
    'memtier_cpu_target and memtier_cpu_duration must not '
    'be None.')
MEMTIER_CPU_TARGET = flags.DEFINE_float(
    'memtier_cpu_target', 0.5,
    'The target CPU utilization when running memtier and trying to get the '
    'latency at variable CPU metric. The target can range from 1%-100% and '
    'represents the percent CPU utilization (e.g. 0.5 -> 50% CPU utilization)')
MEMTIER_CPU_DURATION = flags.DEFINE_integer(
示例#3
0
from typing import Any, Dict, List
from absl import flags
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import xgboost

_TREE_METHOD = flags.DEFINE_enum(
    'xgboost_tree_method', 'gpu_hist', ['gpu_hist', 'hist'],
    'XGBoost builtin tree methods.')
_SPARSITY = flags.DEFINE_float(
    'xgboost_sparsity', 0.0, 'XGBoost sparsity-aware split finding algorithm.')
_ROWS = flags.DEFINE_integer(
    'xgboost_rows', 1000000, 'The number of data rows.')
_COLUMNS = flags.DEFINE_integer(
    'xgboost_columns', 50, 'The number of data columns.')
_ITERATIONS = flags.DEFINE_integer(
    'xgboost_iterations', 500, 'The number of training iterations.')
_TEST_SIZE = flags.DEFINE_float(
    'xgboost_test_size', 0.25,
    'Train-test split for evaluating machine learning algorithms')
_PARAMS = flags.DEFINE_string(
    'xgboost_params', None,
    'Provide additional parameters as a Python dict string, '
    'e.g. --params \"{\'max_depth\':2}\"')

示例#4
0
文件: run.py 项目: rpeloff/moonshot
    "gradient_clip_norm": 5.,
    "epochs": 100,
    # that magic number
    "seed": 42
}


# one-shot evaluation (and validation) options
flags.DEFINE_integer("episodes", 400, "number of L-way K-shot learning episodes")
flags.DEFINE_integer("L", 10, "number of classes to sample in a task episode (L-way)")
flags.DEFINE_integer("K", 1, "number of task learning samples per class (K-shot)")
flags.DEFINE_integer("N", 15, "number of task evaluation samples")
flags.DEFINE_integer("k_neighbours", 1, "number of nearest neighbours to consider")
flags.DEFINE_string("metric", "cosine", "distance metric to use for nearest neighbours matching")
flags.DEFINE_integer("fine_tune_steps", None, "number of fine-tune gradient steps on one-shot data")
flags.DEFINE_float("fine_tune_lr", 1e-3, "learning rate for gradient descent fine-tune")
flags.DEFINE_enum("speaker_mode", "baseline", ["baseline", "difficult", "distractor"],
                  "type of speakers selected in a task episode")
flags.DEFINE_bool("direct_match", True, "directly match speech to images")
flags.DEFINE_bool("unseen_match_set", False, "match set contains classes unseen in K-shot learning")

# model train/test options
flags.DEFINE_string("vision_base_dir", None, "directory containing base vision network model")
flags.DEFINE_string("audio_base_dir", None, "directory containing base audio network model")
flags.DEFINE_bool("l2_norm", True, "L2-normalise embedding predictions (as done in training)")
flags.DEFINE_bool("load_best", False, "load previous best model for resumed training or testing")
flags.DEFINE_bool("mc_dropout", False, "make embedding predictions with MC Dropout")
# TODO optional train from scratch or fine-tune base model?
# flags.DEFINE_bool("use_embeddings", False, "train on extracted embeddings from base model"
#                   "(default loads base network ('<best_>model.h5') and fine-tunes siamese loss)")
示例#5
0
def define_deep_speech_flags():
    """Add flags for run_deep_speech."""
    # Add common flags
    flags_core.define_base(
        data_dir=False,  # we use train_data_dir and eval_data_dir instead
        export_dir=True,
        train_epochs=True,
        hooks=True,
        epochs_between_evals=True,
    )
    flags_core.define_performance(num_parallel_calls=False,
                                  inter_op=False,
                                  intra_op=False,
                                  synthetic_data=False,
                                  max_train_steps=False,
                                  dtype=False)
    flags_core.define_benchmark()
    flags.adopt_module_key_flags(flags_core)

    flags_core.set_defaults(model_dir=_DEFAULT_MODEL_DIR,
                            export_dir=_DEFAULT_SAVE_MODEL_DIR,
                            train_epochs=10,
                            batch_size=128,
                            hooks=[],
                            epochs_between_evals=4)

    # Deep speech flags
    flags.DEFINE_integer(name="seed",
                         default=1,
                         help=flags_core.help_wrap("The random seed."))

    flags.DEFINE_string(
        name="train_data_dir",
        default=_DEFAULT_TRAIN_DIR,
        help=flags_core.help_wrap("The csv file path of train dataset."))

    flags.DEFINE_string(
        name="eval_data_dir",
        default=_DEFAULT_EVAL_DIR,
        help=flags_core.help_wrap("The csv file path of evaluation dataset."))

    flags.DEFINE_bool(
        name="sortagrad",
        default=True,
        help=flags_core.help_wrap(
            "If true, sort examples by audio length and perform no "
            "batch_wise shuffling for the first epoch."))

    flags.DEFINE_integer(
        name="sample_rate",
        default=16000,
        help=flags_core.help_wrap("The sample rate for audio."))

    flags.DEFINE_integer(
        name="window_ms",
        default=20,
        help=flags_core.help_wrap("The frame length for spectrogram."))

    flags.DEFINE_integer(name="stride_ms",
                         default=10,
                         help=flags_core.help_wrap("The frame step."))

    flags.DEFINE_string(
        name="vocabulary_file",
        default=_DEFAULT_CHARACTERS_FILE,
        help=flags_core.help_wrap("The file path of vocabulary file."))

    # RNN related flags
    flags.DEFINE_integer(name="rnn_hidden_size",
                         default=800,
                         help=flags_core.help_wrap("The hidden size of RNNs."))

    flags.DEFINE_integer(
        name="rnn_hidden_layers",
        default=5,
        help=flags_core.help_wrap("The number of RNN layers."))

    flags.DEFINE_bool(name="use_bias",
                      default=True,
                      help=flags_core.help_wrap(
                          "Use bias in the last fully-connected layer"))

    flags.DEFINE_bool(
        name="is_bidirectional",
        default=True,
        help=flags_core.help_wrap("If rnn unit is bidirectional"))

    flags.DEFINE_enum(name="rnn_type",
                      default="gru",
                      enum_values=deep_speech.SUPPORTED_RNNS.keys(),
                      case_sensitive=False,
                      help=flags_core.help_wrap("Type of RNN cell."))

    # Training related flags
    flags.DEFINE_float(name="learning_rate",
                       default=5e-4,
                       help=flags_core.help_wrap("The initial learning rate."))

    # Evaluation metrics threshold
    flags.DEFINE_float(
        name="wer_threshold",
        default=None,
        help=flags_core.help_wrap(
            "If passed, training will stop when the evaluation metric WER is "
            "greater than or equal to wer_threshold. For libri speech dataset "
            "the desired wer_threshold is 0.23 which is the result achieved by "
            "MLPerf implementation."))

    flags.DEFINE_integer(name='num_gpus', default=-1, help='num_gpus')
示例#6
0
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")

flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")

flags.DEFINE_integer("epochs", 5, "Total epochs.")

flags.DEFINE_integer("train_data_size", 10000, "The number of examples in the"
                     "training data")

flags.DEFINE_integer("eval_data_size", -1, "The number of examples in the"
                     "validation data")

flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")

flags.DEFINE_float("learning_rate", 5e-5,
                   "The initial learning rate for Adam.")

flags.DEFINE_float(
    "warmup_proportion", 0.1,
    "Proportion of training to perform linear learning rate warmup for. "
    "E.g., 0.1 = 10% of training.")

flags.DEFINE_integer("save_checkpoints_steps", 10000,
                     "How often to save the model checkpoint.")

flags.DEFINE_integer("iterations_per_loop", 1000,
                     "How many steps to make in each estimator call.")

flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")

flags.DEFINE_string(
示例#7
0
        return ops


def main(argv):
    del argv  # Unused.
    batch = FLAGS.batch
    dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
    scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
    model = AAE(dataset,
                FLAGS.train_dir,
                latent=FLAGS.latent,
                depth=FLAGS.depth,
                scales=scales,
                adversary_lr=FLAGS.adversary_lr,
                disc_layer_sizes=FLAGS.disc_layer_sizes)
    model.train()


if __name__ == '__main__':
    flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
    flags.DEFINE_integer(
        'latent', 16,
        'Latent space depth, the total latent size is the depth multiplied by '
        'latent_width ** 2.')
    flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
    flags.DEFINE_float('adversary_lr', 1e-4,
                       'Learning rate for discriminator.')
    flags.DEFINE_string('disc_layer_sizes', '100,100',
                        'Comma-separated list of discriminator layer sizes.')
    app.run(main)
示例#8
0
flags.DEFINE_integer("n_steps_per_batch", 8, "Number of steps per batch")
flags.DEFINE_integer("all_summary_freq", 50,
                     "Record all summaries every n batch")
flags.DEFINE_integer("scalar_summary_freq", 5,
                     "Record scalar summaries every n batch")
flags.DEFINE_string("checkpoint_path", "_files/models",
                    "Path for agent checkpoints")
flags.DEFINE_string("summary_path", "_files/summaries",
                    "Path for tensorboard summaries")
flags.DEFINE_string("model_name", "temp_testing",
                    "Name for checkpoints and tensorboard summaries")
flags.DEFINE_integer(
    "K_batches", -1,
    "Number of training batches to run in thousands, use -1 to run forever")
flags.DEFINE_string("map_name", "MoveToBeacon", "Name of a map to use.")
flags.DEFINE_float("discount", 0.95, "Reward-discount for the agent")
flags.DEFINE_boolean(
    "training", True,
    "if should train the model, if false then save only episode score summaries"
)
flags.DEFINE_enum(
    "if_output_exists", "fail", ["fail", "overwrite", "continue"],
    "What to do if summary and model output exists, only for training, is ignored if notraining"
)
flags.DEFINE_float("max_gradient_norm", 500.0,
                   "good value might depend on the environment")
flags.DEFINE_float("loss_value_weight", 1.0,
                   "good value might depend on the environment")
flags.DEFINE_float("entropy_weight_spatial", 1e-6,
                   "entropy of spatial action distribution loss weight")
flags.DEFINE_float("entropy_weight_action", 1e-6,
示例#9
0
FLAGS = flags.FLAGS

# TODO(gnegiar): Use return values from flags.
flags.DEFINE_enum('dataset', 'mnist', ['mnist', 'lra_pathfinder'],
                  'which dataset to use')
flags.DEFINE_enum('pathfinder_difficulty', 'easy', ['easy', 'hard'],
                  'The level of difficulty for the pathfinder dataset.')
flags.DEFINE_integer('pathfinder_resolution', 32,
                     'Resolution for the pathfinder task.')
flags.DEFINE_integer('batch_size', 128, 'Training batch size')
flags.DEFINE_integer('log_freq', 10,
                     'Log batch accuracy and loss every log_freq iterations.')
flags.DEFINE_integer('n_epochs', 5, 'Number of training epochs.')
flags.DEFINE_float(
    'alpha', .1,
    'Probability of teleporting back to initial node distribution.')
flags.DEFINE_float('rho', 1e-3, 'L1 regularization in sparse PageRank.')
flags.DEFINE_integer('patch_size',
                     5,
                     'Size of the patch used to compute node features.',
                     lower_bound=1)
flags.DEFINE_enum('optimizer', 'adam', ['adam', 'sgd'], 'optimizer to use')
flags.DEFINE_float('learning_rate', 1e-2, 'Learning rate for the optimizer.')
flags.DEFINE_string(
    'tensorboard_logdir', None,
    'Path to directory where tensorboard summaries are stored.')
flags.DEFINE_bool(
    'use_node_weights', True,
    'Whether to use the learned node weights in the downstream graph model.')
flags.DEFINE_integer('max_subgraph_size', 100,
示例#10
0
"""
A cartpole learning example with OpenAI gym
===========================================

This example illustrates how to use `OpenAI Gym <https://gym.openai.com/>`_ to 
train a cartpole task.
"""

import norse.task.cartpole as cartpole
from absl import app
from absl import flags

flags.DEFINE_enum("device", "cpu", ["cpu", "cuda"], "Device to use by pytorch.")
flags.DEFINE_integer("episodes", 100, "Number of training trials.")
flags.DEFINE_float("learning_rate", 1e-3, "Learning rate to use.")
flags.DEFINE_float("gamma", 0.99, "discount factor to use")
flags.DEFINE_integer(
    "log_interval", 10, "In which intervals to display learning progress."
)
flags.DEFINE_enum("model", "super", ["super"], "Model to use for training.")
flags.DEFINE_enum("policy", "snn", ["snn", "lsnn", "ann"], "Select policy to use.")
flags.DEFINE_boolean("render", False, "Render the environment")
flags.DEFINE_string("environment", "CartPole-v1", "Gym environment to use.")
flags.DEFINE_integer("random_seed", 1234, "Random seed to use")

def main():
    app.run(cartpole.main)

if __name__ == "__main__":
    main()
示例#11
0
    sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "..")))
    from utils.process_wav import wav2fb
    from utils.split_data import split_data

root_dir = os.path.abspath(os.path.join(os.getcwd(), "../.."))

FLAGS = flags.FLAGS

flags.DEFINE_string("data_dir",
                    os.path.join(root_dir, "data/vad_data"),
                    help="the vad data dir")
flags.DEFINE_string("save_dir",
                    os.path.join(root_dir, "data/bin"),
                    help="save fbank vector dir")
flags.DEFINE_string("category", "test", help="the category of data")
flags.DEFINE_float("validata_scale", 0.05, help="the scale of validate data")


def main(argv):
    # 分割数据集
    train_list, validate_list = split_data(FLAGS.data_dir, FLAGS.category,
                                           FLAGS.validata_scale)
    # 将特征向量存入 bin 文件中
    # 处理训练数据 分为训练集和验证集
    if FLAGS.validata_scale > 0:
        wav2fb(train_list, os.path.abspath(FLAGS.save_dir), "train")
        wav2fb(validate_list, os.path.abspath(FLAGS.save_dir), "validate")
    # 处理注册和验证时的数据 validata_scale=0
    else:
        wav2fb(train_list, os.path.abspath(FLAGS.save_dir), FLAGS.category)
示例#12
0
FLAGS = flags.FLAGS

flags.DEFINE_string('file_pattern', None, 'Dataset location.')
flags.DEFINE_string('sk', None, 'Samples name.')
flags.DEFINE_alias('samples_key', 'sk')
flags.DEFINE_integer('ml', 16000, 'Minimum length.')
flags.DEFINE_alias('min_length', 'ml')
flags.DEFINE_string('label_key', None, 'Name of label to use.')
flags.DEFINE_list('label_list', None, 'List of possible label values.')

flags.DEFINE_integer('batch_size', None, 'The number of images in each batch.')
flags.DEFINE_integer('tbs', None, 'not used')

flags.DEFINE_integer('nc', None, 'num_clusters')
flags.DEFINE_float('alpha_init', None, 'Initial autopool alpha.')
flags.DEFINE_alias('ai', 'alpha_init')
flags.DEFINE_boolean('ubn', None, 'Whether to normalize')
flags.DEFINE_float('lr', None, 'not used')

flags.DEFINE_string('logdir', None,
                    'Directory where the model was written to.')

flags.DEFINE_string('eval_dir', None,
                    'Directory where the results are saved to.')
flags.DEFINE_integer('take_fixed_data', None,
                     'If not `None`, take a fixed number of data elements.')
flags.DEFINE_integer('timeout', 7200, 'Wait-for-checkpoint timeout.')
flags.DEFINE_boolean(
    'calculate_equal_error_rate', False,
    'Whether to calculate the Equal Error Rate. Only '
示例#13
0
from deep_contextual_bandits import neural_linear_sampling  # local file import
from deep_contextual_bandits import posterior_bnn_sampling  # local file import
from deep_contextual_bandits import uniform_sampling  # local file import

from tensorflow.contrib import training as contrib_training

gfile = tf.compat.v1.gfile

tf.compat.v1.enable_eager_execution()

FLAGS = flags.FLAGS
FLAGS.set_default('alsologtostderr', True)
flags.DEFINE_string('logdir', '/tmp/bandits/',
                    'Base directory to save output.')
flags.DEFINE_integer('trial_idx', 0, 'Rerun idx of problem instance.')
flags.DEFINE_float('delta', 0.5, 'delta parameter for wheel bandit instance.')
flags.DEFINE_string('modeldir', '/tmp/wheel_bandit/models/multitask',
                    'Directory with pretrained models.')
flags.DEFINE_string('savedir', '/tmp/wheel_bandit/results/',
                    'Directory with saved pkl files for full results.')
flags.DEFINE_string('ckptdir', '/tmp/wheel_bandit/ckpts/',
                    'Directory with saved pkl files for full ckpts.')
flags.DEFINE_string('datasetdir', '/tmp/wheel_bandit/data/',
                    'Directory with saved data instances.')
flags.DEFINE_integer('exp_idx', 0, 'Experiment idx of full run.')
flags.DEFINE_string('prefix', 'best_', 'Prefix of best model ckpts.')
flags.DEFINE_string('suffix', '_mse.ckpt', 'Suffix of best model ckpts.')
flags.DEFINE_list('algo_names', ['uniform', 'snp_posterior_gp_offline'],
                  'List of algorithms to benchmark.')

context_dim = 2
示例#14
0
flags.DEFINE_string(
    'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
    ' containing attributes to use as hyperparameters.')

flags.DEFINE_string('input_image', None, 'Input image path for inference.')
flags.DEFINE_string('output_image_dir', None, 'Output dir for inference.')

# For video.
flags.DEFINE_string('input_video', None, 'Input video path for inference.')
flags.DEFINE_string('output_video', None,
                    'Output video path. If None, play it online instead.')

# For visualization.
flags.DEFINE_integer('line_thickness', None, 'Line thickness for box.')
flags.DEFINE_integer('max_boxes_to_draw', None, 'Max number of boxes to draw.')
flags.DEFINE_float('min_score_thresh', None, 'Score threshold to show box.')

# For saved model.
flags.DEFINE_string('saved_model_dir', '/tmp/saved_model',
                    'Folder path for saved model.')
flags.DEFINE_string('tflite_path', None, 'Path for exporting tflite file.')

FLAGS = flags.FLAGS


class ModelInspector(object):
  """A simple helper class for inspecting a model."""

  def __init__(self,
               model_name: Text,
               logdir: Text,
示例#15
0
                          "List of (string) path(s) to evaluation file(s).")

# # If the model has an adversary, the features for adversary are constructed
# # in the corresponding custom estimator implementation by filtering feature_columns passed to the learner.
flags.DEFINE_bool(
    "include_sensitive_columns", False,
    "Set the flag to include protected features in the feature_columns of the learner."
)

# Flags for setting common model parameters for all approaches
flags.DEFINE_multi_integer("primary_hidden_units", [64, 32],
                           "Hidden layer sizes of main learner.")
flags.DEFINE_integer("embedding_dimension", 32,
                     "Embedding size; if 0, use one hot.")
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_float("primary_learning_rate", 0.001,
                   "learning rate for main learner.")
flags.DEFINE_string("optimizer", "Adagrad", "Name of the optimizer to use.")
flags.DEFINE_string("activation", "relu", "Name of the activation to use.")

# # Flags for approaches that have an adversary
# # Currently only for ''adversarial_reweighting'' Model.
flags.DEFINE_multi_integer("adversary_hidden_units", [32],
                           "Hidden layer sizes of adversary.")
flags.DEFINE_float("adversary_learning_rate", 0.001,
                   "learning rate for adversary.")

# # Flags for adversarial_reweighting model
flags.DEFINE_string(
    "adversary_loss_type", "ce_loss",
    "Type of adversary loss function to be used. Takes values in [``ce_loss'',''hinge_loss'']. ``ce loss`` stands for cross-entropy loss"
)
示例#16
0
文件: mcts.py 项目: toysmars/minigo
"""

import collections
import math

from absl import flags
import numpy as np

import coords
import go

# 722 moves for 19x19, 162 for 9x9
flags.DEFINE_integer('max_game_length', int(go.N ** 2 * 2),
                     'Move number at which game is forcibly terminated')

flags.DEFINE_float('c_puct_base', 19652,
                   'Exploration constants balancing priors vs. value net output.')

flags.DEFINE_float('c_puct_init', 1.25,
                   'Exploration constants balancing priors vs. value net output.')

flags.DEFINE_float('dirichlet_noise_alpha', 0.03 * 361 / (go.N ** 2),
                   'Concentrated-ness of the noise being injected into priors.')
flags.register_validator('dirichlet_noise_alpha', lambda x: 0 <= x < 1)

flags.DEFINE_float('dirichlet_noise_weight', 0.25,
                   'How much to weight the priors vs. dirichlet noise when mixing')
flags.register_validator('dirichlet_noise_weight', lambda x: 0 <= x < 1)

FLAGS = flags.FLAGS

示例#17
0
文件: shape.py 项目: dustymugs/cmr
import scipy.io as sio
from collections import OrderedDict

from ..data import cub as cub_data
from ..utils import visutil
from ..utils import bird_vis
from ..utils import image as image_utils
from ..nnutils import train_utils
from ..nnutils import loss_utils
from ..nnutils import mesh_net
from ..nnutils.nmr import NeuralRenderer
from ..nnutils import geom_utils

flags.DEFINE_string('dataset', 'cub', 'cub or pascal or p3d')
# Weights:
flags.DEFINE_float('kp_loss_wt', 30., 'keypoint loss weight')
flags.DEFINE_float('mask_loss_wt', 2., 'mask loss weight')
flags.DEFINE_float('cam_loss_wt', 2., 'weights to camera loss')
flags.DEFINE_float('deform_reg_wt', 10., 'reg to deformation')
flags.DEFINE_float('triangle_reg_wt', 30.,
                   'weights to triangle smoothness prior')
flags.DEFINE_float('vert2kp_loss_wt', .16, 'reg to vertex assignment')
flags.DEFINE_float('tex_loss_wt', .5, 'weights to tex loss')
flags.DEFINE_float('tex_dt_loss_wt', .5, 'weights to tex dt loss')
flags.DEFINE_boolean(
    'use_gtpose', True,
    'if true uses gt pose for projection, but camera still gets trained.')
flags.DEFINE_boolean('include_weighted', False,
                     'if True, include weighted loss values to loss output')

opts = flags.FLAGS
from __future__ import print_function

from absl import app
from absl import flags
import tensorflow.compat.v2 as tf

from tensorflow.examples.saved_model.integration_tests import mnist_util
from tensorflow.examples.saved_model.integration_tests import util

FLAGS = flags.FLAGS

flags.DEFINE_string('export_dir', None, 'Directory of exported SavedModel.')
flags.DEFINE_integer('epochs', 5, 'Number of epochs to train.')
flags.DEFINE_bool('retrain', False,
                  'If set, the imported SavedModel is trained further.')
flags.DEFINE_float('dropout_rate', None,
                   'If set, dropout rate passed to the SavedModel.')
flags.DEFINE_float(
    'regularization_loss_multiplier', None,
    'If set, multiplier for the regularization losses in the SavedModel.')
flags.DEFINE_bool(
    'use_fashion_mnist', False,
    'Use Fashion MNIST (products) instead of the real MNIST (digits). '
    'With this, --retrain gains a lot.')
flags.DEFINE_bool('fast_test_mode', False,
                  'Shortcut training for running in unit tests.')


def make_classifier(feature_extractor, l2_strength=0.01, dropout_rate=0.5):
    """Returns a Keras Model to classify MNIST using feature_extractor."""
    regularizer = lambda: tf.keras.regularizers.l2(l2_strength)
    net = inp = tf.keras.Input(mnist_util.INPUT_SHAPE)
示例#19
0
from dice_rl.google.estimators.tabular_qlearning import TabularQLearning
from dice_rl.estimators import estimator as estimator_lib
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
import google3.learning.deepmind.xmanager2.client.google as xm  # pylint: disable=unused-import

FLAGS = flags.FLAGS

flags.DEFINE_string('env_name', 'taxi', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 100,
                     'Number of trajectories to collect.')
flags.DEFINE_integer('max_trajectory_length', 500,
                     'Cutoff trajectory at this step.')
flags.DEFINE_float('alpha', 0.0, 'How close to target policy.')
flags.DEFINE_bool('tabular_obs', True, 'Whether to use tabular observations.')
#flags.DEFINE_string('load_dir', '/cns/is-d/home/sherryy/teqdice/data',
flags.DEFINE_string('load_dir', '/cns/vz-d/home/brain-ofirnachum',
                    'Directory to load dataset from.')
flags.DEFINE_string('save_dir', None, 'Directory to save estimation results.')
flags.DEFINE_float('gamma', 0.995, 'Discount factor.')
flags.DEFINE_integer('limit_episodes', None,
                     'Number of episodes to take from dataset.')


def main(argv):
    env_name = FLAGS.env_name
    seed = FLAGS.seed
    tabular_obs = FLAGS.tabular_obs
    num_trajectory = FLAGS.num_trajectory
from absl import flags

# Dataset/training options
flags.DEFINE_integer('seed', 1, 'Random seed.')
flags.DEFINE_integer('batch_size', 32, 'Batch size per TPU Core / GPU')
flags.DEFINE_float('base_learning_rate', 3.2e-04, 'base learning rate at the start of training session')
flags.DEFINE_integer('lr_warmup_epochs', 1, 'No. of epochs for a warmup to the base_learning_rate. 0 for no warmup')
flags.DEFINE_float('lr_drop_ratio', 0.8, 'Amount to decay the learning rate')
flags.DEFINE_bool('custom_decay_lr', False, 'Whether to specify epochs to decay learning rate.')
flags.DEFINE_list('lr_decay_epochs', [10, 20, 40, 60], 'Epochs to decay the learning rate by. Only used if custom_decay_lr is True')
flags.DEFINE_string('dataset', 'oai_challenge', 'Dataset: oai_challenge, isic_2018 or oai_full')
flags.DEFINE_bool('use_2d', True, 'True to train on 2D slices, False to train on 3D data')
flags.DEFINE_integer('train_epochs', 50, 'Number of training epochs.')
flags.DEFINE_string('aug_strategy', None, 'Augmentation Strategies: None, random-crop, noise, crop_and_noise')

# Model options
flags.DEFINE_string('model_architecture', 'unet', 'unet, r2unet, segnet, unet++, 100-Layer-Tiramisu, deeplabv3, deeplabv3_plus')
flags.DEFINE_integer('buffer_size', 5000, 'shuffle buffer size')
flags.DEFINE_bool('multi_class', True, 'Whether to train on a multi-class (Default) or binary setting')
flags.DEFINE_integer('kernel_size', 3, 'kernel size to be used')
flags.DEFINE_bool('use_batchnorm', True, 'Whether to use batch normalisation')
flags.DEFINE_bool('use_bias', True, 'Wheter to use bias')
flags.DEFINE_string('channel_order', 'channels_last', 'channels_last (Default) or channels_first')
flags.DEFINE_string('activation', 'relu', 'activation function to be used')
flags.DEFINE_bool('use_dropout', False, 'Whether to use dropout')
flags.DEFINE_bool('use_spatial', False, 'Whether to use spatial Dropout. Only used if use_dropout is True')
flags.DEFINE_float('dropout_rate', 0.0, 'Dropout rate. Only used if use_dropout is True')
flags.DEFINE_string('optimizer', 'adam', 'Which optimizer to use for model: adam, rmsprop, sgd')

# UNet parameters
flags.DEFINE_list('num_filters', [64, 128, 256, 512, 1024], 'number of filters in the model')
示例#21
0
FLAGS = flags.FLAGS

# Basic run settings
flags.DEFINE_enum("mode",
                  default='train',
                  enum_values=['train', 'test', 'inference_benchmark'],
                  help="Select task to be performed")

flags.DEFINE_integer("seed", 12345, "Random seed")

# Training schedule flags
flags.DEFINE_integer("batch_size", 32768, "Batch size used for training")
flags.DEFINE_integer("test_batch_size", 32768,
                     "Batch size used for testing/validation")
flags.DEFINE_float("lr", 28, "Base learning rate")
flags.DEFINE_integer("epochs", 1, "Number of epochs to train for")
flags.DEFINE_integer("max_steps", None,
                     "Stop training after doing this many optimization steps")

flags.DEFINE_integer(
    "warmup_factor", 0,
    "Learning rate warmup factor. Must be a non-negative integer")
flags.DEFINE_integer("warmup_steps", 6400,
                     "Number of warmup optimization steps")
flags.DEFINE_integer(
    "decay_steps", 80000,
    "Polynomial learning rate decay steps. If equal to 0 will not do any decaying"
)
flags.DEFINE_integer(
    "decay_start_step", 64000,
示例#22
0
from __future__ import print_function

import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Contrib.SA_Score import sascorer

from dqn import deep_q_networks
from dqn import molecules as molecules_mdp
from dqn import run_dqn
from dqn.tensorflow_core import core

flags.DEFINE_float('target_sas', 2.5,
                   'The target synthetic accessibility value')
flags.DEFINE_string('loss_type', 'l2', 'The loss type')
FLAGS = flags.FLAGS


class TargetSASMolecule(molecules_mdp.Molecule):
    """Target SAS reward Molecule."""
    def __init__(self, discount_factor, target_sas, loss_type, **kwargs):
        """Initializes the class.

    Args:
      discount_factor: Float. The discount factor. We only care about the
        molecule at the end of modification. In order to prevent a myopic
        decision, we discount the reward at each step by a factor of
        discount_factor ** num_steps_left, this encourages exploration with
        emphasis on long term rewards.
示例#23
0
    'may elapse while no new checkpoints are observed')

flags.DEFINE_bool(
    'use_tpu', True,
    'Use TPUs rather than plain CPUs')

flags.DEFINE_boolean(
    'per_host_input_for_training', True,
    'If true, input_fn is invoked per host rather than per shard.')

flags.DEFINE_string(
    'use_data', 'real',
    'One of "fake","real"')

flags.DEFINE_float(
    'learning_rate', 0.165,
    'Learning rate.')

flags.DEFINE_float(
    'depth_multiplier', 1.0,
    'Depth Multiplier on Inception')

flags.DEFINE_string(
    'optimizer', 'RMS',
    'Optimizer (one of sgd, RMS, momentum)')

flags.DEFINE_integer(
    'num_classes', 1001,
    'Number of classes to distinguish')

flags.DEFINE_integer(
import os
import time
from absl import app
from absl import flags
from absl import logging

import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils  # local file import
import uncertainty_metrics as um

flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')
flags.DEFINE_float(
    'base_learning_rate', 0.1,
    'Base learning rate when total batch size is 128. It is '
    'scaled by the ratio of the total batch size to 128.')
flags.DEFINE_integer(
    'lr_warmup_epochs', 1,
    'Number of epochs for a linear warmup to the initial '
    'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('lr_decay_ratio', 0.2, 'Amount to decay learning rate.')
flags.DEFINE_list('lr_decay_epochs', ['60', '120', '160'],
                  'Epochs to decay learning rate by.')
flags.DEFINE_integer('kl_annealing_epochs', 200,
                     'Number of epoch over which to anneal the KL term to 1.')
flags.DEFINE_float('l2', 4e-4, 'L2 regularization coefficient.')
flags.DEFINE_float('prior_stddev', 0.1, 'Fixed stddev for weight prior.')
flags.DEFINE_float(
    'stddev_init', 1e-3,
    'Initialization of posterior standard deviation parameters.')
示例#25
0
flags.DEFINE_integer("char_limit", 16, "Limit length for character")
flags.DEFINE_integer("word_count_limit", -1, "Min count for word")
flags.DEFINE_integer("char_count_limit", -1, "Min count for char")

flags.DEFINE_integer("capacity", 15000, "Batch size of dataset shuffle")
flags.DEFINE_integer("num_threads", 4, "Number of threads in input pipeline")
flags.DEFINE_boolean("is_bucket", False, "build bucket batch iterator or not")
flags.DEFINE_list("bucket_range", [40, 401, 40], "the range of bucket")

flags.DEFINE_integer("batch_size", 32, "Batch size")
flags.DEFINE_integer("num_steps", 60000, "Number of steps")
flags.DEFINE_integer("checkpoint", 1000, "checkpoint to save and evaluate the model")
flags.DEFINE_integer("period", 100, "period to save batch loss")
flags.DEFINE_integer("val_num_batches", 150, "Number of batches to evaluate the model")
flags.DEFINE_integer("test_num_batches", 150, "Number of batches to evaluate the model")
flags.DEFINE_float("dropout", 0.1, "Dropout prob across the layers")
flags.DEFINE_float("dropout_char", 0.05, "Dropout prob across the layers")
flags.DEFINE_float("grad_clip", 5.0, "Global Norm gradient clipping rate")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
flags.DEFINE_integer("lr_warm_up_num", 1000, "Number of warm-up steps of learning rate")
flags.DEFINE_float("decay", 0.9999, "Exponential moving average decay")
flags.DEFINE_float("beta1", 0.8, "Beta 1")
flags.DEFINE_float("beta2", 0.999, "Beta 2")
# flags.DEFINE_float("l2_norm", 3e-7, "L2 norm scale")
flags.DEFINE_integer("early_stop", 10, "Checkpoints for early stop")
flags.DEFINE_integer("connector_dim", 128, "Dimension of connectors of each layer")
flags.DEFINE_integer("num_heads", 8, "Number of heads in multi-head attention")

# Extensions (Uncomment corresponding line in download.sh to download the required data)
glove_char_file = os.path.join(home, "data", "glove", "glove.840B.300d-char.txt")
flags.DEFINE_string("glove_char_file", glove_char_file, "Glove character embedding")
示例#26
0
from absl import flags
from absl import logging
from seed_rl import grpc
from seed_rl.common import common_flags
from seed_rl.utils import utils
import tensorflow as tf

flags.DEFINE_integer('save_checkpoint_secs', 1800,
                     'Checkpoint save period in seconds.')
flags.DEFINE_integer('total_environment_frames', int(1e9),
                     'Total environment frames to train for.')
flags.DEFINE_integer('batch_size', 2, 'Batch size for training.')
flags.DEFINE_float(
    'replay_ratio', 1.5,
    'Average number of times each observation is replayed and '
    'used for training. '
    'The default of 1.5 corresponds to an interpretation of the '
    'R2D2 paper using the end of section 2.3.')
flags.DEFINE_integer('inference_batch_size', 2, 'Batch size for inference.')
flags.DEFINE_integer('unroll_length', 100, 'Unroll length in agent steps.')
flags.DEFINE_integer('num_training_tpus', 1, 'Number of TPUs for training.')
flags.DEFINE_integer(
    'update_target_every_n_step', 2500,
    'Update the target network at this frequency (expressed '
    'in number of training steps)')
flags.DEFINE_integer(
    'replay_buffer_size', 100,
    'Size of the replay buffer (in number of unrolls stored).')
flags.DEFINE_integer(
    'replay_buffer_min_size', 10,
    'Learning only starts when there is at least this number '
示例#27
0
        ' for max parallelization.')

    flags.DEFINE_integer(
        'batch_size', 128,
        'Batch size for detection: higher faster, but more memory intensive.')

    flags.DEFINE_string(
        'model',
        'yolo',
        # flags.DEFINE_string('model', 'frcnn',
        'Model to use, either yolo or frcnn')

    flags.DEFINE_integer('detect_every', 20,
                         'The frame interval to perform detection.')
    flags.DEFINE_float(
        'save_detection_threshold', 0.5,
        'The threshold on detections to them being saved to the detection save file.'
    )

    # tracking params if needed
    flags.DEFINE_float('track_detection_threshold', 0.5,
                       'The threshold on detections to them being tracked.')
    flags.DEFINE_integer(
        'max_age', 40,
        'Maximum frames between detections before a track is deleted. Bigger means tracks handle'
        'occlusions better but also might overstay their welcome.')
    flags.DEFINE_integer(
        'min_hits', 2,
        'Minimum number of detections before a track is displayed.')

    try:
        app.run(main)
def define_hparams_flags():

    flags.DEFINE_string(
        'log_path',
        default="./mrcnn.json",
        help=(
            'The path where dllogger json file will be saved. Please include the'
            ' name of the json file as well.'
        )
    )

    flags.DEFINE_string(
        'data_dir',
        default=None,
        help=(
            'The directory where the input data is stored. Please see the model'
            ' specific README.md for the expected data format.'
        )
    )

    flags.DEFINE_string('checkpoint', default='', help='Checkpoint filepath')

    flags.DEFINE_integer(
        'eval_batch_size',
        default=8,
        help='Batch size for evaluation.'
    )

    flags.DEFINE_bool(
        'eval_after_training',
        default=True,
        help='Run one eval after the training finishes.'
    )

    flags.DEFINE_integer('eval_samples', default=5000, help='Number of training steps')

    flags.DEFINE_bool(
        'include_groundtruth_in_features',
        default=False,
        help=(
            'If `val_json_file` is not provided, one can also read groundtruth'
            ' from input by setting `include_groundtruth_in_features`=True'
        )
    )

    # Gradient clipping is a fairly coarse heuristic to stabilize training.
    # This model clips the gradient by its L2 norm globally (i.e., across
    # all variables), using a threshold obtained from multiplying this
    # parameter with sqrt(number_of_weights), to have a meaningful value
    # across both training phases and different sizes of imported modules.
    # Refer value: 0.02, for 25M weights, yields clip norm 10.
    # Zero or negative number means no clipping.
    flags.DEFINE_float("global_gradient_clip_ratio", default=-1.0, help="Global Gradient Clipping Ratio")

    flags.DEFINE_float("init_learning_rate", default=2.5e-3, help="Initial Learning Rate")

    flags.DEFINE_float("warmup_learning_rate", default=0., help="Warmup Learning Rate Decay Factor")

    flags.DEFINE_bool('finetune_bn', False, 'is batchnorm training mode')

    flags.DEFINE_float("l2_weight_decay", default=1e-4, help="l2 regularization weight")

    flags.DEFINE_string('mode', default='train_and_eval', help='Mode to run: train or eval')

    flags.DEFINE_string(
        'model_dir',
        default=None,
        help='The directory where the model and training/evaluation summaries are stored.'
    )

    flags.DEFINE_float("momentum", default=0.9, help="Optimizer Momentum")

    flags.DEFINE_integer('num_steps_per_eval', default=2500, help='Number of steps per evaluation epoch.')

    flags.DEFINE_integer('save_checkpoints_steps', default=2500, help='Save a checkpoint every N steps.')

    flags.DEFINE_integer('seed', default=None, help='Set a debug seed for reproducibility.')

    flags.DEFINE_integer('train_batch_size', default=2, help='Batch size for training.')

    flags.DEFINE_integer(
        'total_steps',
        default=938240,
        help=(
            'The number of steps to use for training. This flag'
            ' should be adjusted according to the --train_batch_size flag.'
        )
    )

    flags.DEFINE_list(
        'learning_rate_decay_levels',
        default=['0.1', '0.01'],
        help=(
            'The learning rate decay levels which modify the learning rate using the formula:'
            ' `lr = decay * init_lr`. Decay factor applied at learning_rate_steps.'
        )
    )
    flags.DEFINE_list(
        'learning_rate_steps',
        default=['480000', '640000'],
        help=(
            'The steps at which learning rate changes. This flag'
            ' should be adjusted according to the --train_batch_size flag.'
        )
    )
    flags.DEFINE_integer('warmup_steps', default=1000, help='The number of steps to use warmup learning rate for')

    flags.DEFINE_bool('use_amp', default=False, help='Enable automatic mixed precision')

    flags.DEFINE_bool(
        'use_batched_nms',
        default=False,
        help='Enable Batched NMS at inference.'
    )

    flags.DEFINE_bool(
        'use_custom_box_proposals_op',
        default=False,
        help='Use GenerateBoundingBoxProposals op.'
    )

    flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')

    flags.DEFINE_bool(
        'use_tf_distributed',
        default=False,
        help='Use tensorflow distributed API'
    )

    flags.DEFINE_bool('use_xla', default=False, help='Enable XLA JIT Compiler.')

    flags.DEFINE_string('training_file_pattern', default="", help='TFRecords file pattern for the training files')

    flags.DEFINE_string('validation_file_pattern', default="", help='TFRecords file pattern for the validation files')

    flags.DEFINE_string('val_json_file', default="", help='Filepath for the validation json file')

    ############################# TO BE REMOVED ###################################

    flags.DEFINE_integer(
        'report_frequency',
        default=None,
        help='The amount of batches in between accuracy reports at evaluation time'
    )

    ############################# TO BE REMOVED ###################################

    ############################### ISSUES TO FIX - FLAGS #############################"

    # TODO: Remove when XLA at inference fixed
    flags.DEFINE_bool(
        'allow_xla_at_inference',
        default=False,
        help='Enable XLA JIT Compiler at Inference'
    )

    return flags.FLAGS
示例#29
0
}

loss_fns = {
    'bce': losses.BCEWithLogits,
    'hinge': losses.Hinge,
    'was': losses.Wasserstein,
    'softplus': losses.Softplus
}

FLAGS = flags.FLAGS
# model and training
flags.DEFINE_enum('dataset', 'cifar10', ['cifar10', 'stl10'], "dataset")
flags.DEFINE_enum('arch', 'cnn32', net_G_models.keys(), "architecture")
flags.DEFINE_integer('total_steps', 50000, "total number of training steps")
flags.DEFINE_integer('batch_size', 128, "batch size")
flags.DEFINE_float('lr_G', 2e-4, "Generator learning rate")
flags.DEFINE_float('lr_D', 2e-4, "Discriminator learning rate")
flags.DEFINE_multi_float('betas', [0.5, 0.9], "for Adam")
flags.DEFINE_integer('n_dis', 1, "update Generator every this steps")
flags.DEFINE_integer('z_dim', 100, "latent space dimension")
flags.DEFINE_enum('loss', 'bce', loss_fns.keys(), "loss function")
flags.DEFINE_integer('seed', 0, "random seed")
# logging
flags.DEFINE_integer('eval_step', 5000, "evaluate FID and Inception Score")
flags.DEFINE_integer('sample_step', 500, "sample image every this steps")
flags.DEFINE_integer('sample_size', 64, "sampling size of images")
flags.DEFINE_string('logdir', './logs/DCGAN_CIFAR10', 'logging folder')
flags.DEFINE_bool('record', True, "record inception score and FID score")
flags.DEFINE_string('fid_cache', './stats/cifar10_stats.npz', 'FID cache')
# generate
flags.DEFINE_bool('generate', False, 'generate images')
示例#30
0
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
import tensorflow as tf

from tensorflow_probability import bijectors as tfb
from tensorflow_probability import distributions as tfd
from tensorflow.contrib.learn.python.learn.datasets import mnist
from tensorflow.python.training import moving_averages

# TODO(vafa): Set random seed.
IMAGE_SHAPE = [28, 28, 1]
INITIAL_SCALE_BIAS = np.log(np.e / 2. - 1., dtype=np.float32)

flags.DEFINE_float("learning_rate",
                   default=0.001,
                   help="Initial learning rate.")
flags.DEFINE_integer("max_steps",
                     default=10000,
                     help="Number of training steps to run.")
flags.DEFINE_integer("latent_size",
                     default=10,
                     help="Number of latent variables.")
flags.DEFINE_integer("num_codes",
                     default=64,
                     help="Number of discrete codes in codebook.")
flags.DEFINE_integer("code_size",
                     default=16,
                     help="Dimension of each entry in codebook.")
flags.DEFINE_integer("base_depth",
                     default=32,