Esempio n. 1
0
TUNER_LOSS_TO_GOAL = {
    TUNER_LOSS_LOSS: TUNER_GOAL_MIN,
    TUNER_LOSS_AUC: TUNER_GOAL_MAX,
}

flags.DEFINE_integer('task', 0, 'Task id when running online')
flags.DEFINE_string('master', '', 'TensorFlow master to use')
flags.DEFINE_string('input_dir', None, 'Path to input data.')
flags.DEFINE_string(
    'affinity_target_map', '',
    'Name of the affinity map from count values to affinity values. '
    'Needed only if using input_dir and running inference or using '
    'microarray values.')
flags.DEFINE_enum(
    'dataset', None,
    sorted(config.INPUT_DATA_DIRS),
    'Name of dataset with known input_dir on which to train. Either input_dir '
    'or dataset is required.')
flags.DEFINE_integer('val_fold', 0, 'Fold to use for validation.')
flags.DEFINE_string('save_base', None,
                    'Base path to save any output or weights.')
flags.DEFINE_string('run_name', None, 'Name of folder created in save_base.')
flags.DEFINE_bool(
    'interactive_display', False,
    'Scale displayed pandas DataFrames to the active terminal window?')
flags.DEFINE_boolean(
    'autotune', False,
    'If true, use automated hyperparameter optimization via Vizier.')
flags.DEFINE_string('tuner_target', 'mean',
                    'Target count(s) for use for tuner optimization.')
flags.DEFINE_enum('tuner_loss', 'auc/true_top_1p',
Esempio n. 2
0
"""

from __future__ import print_function

#from absl import app
from absl import flags as absl_flags
import tensorflow as tf
import flags

flags.DEFINE_string('network_dir', None, 'network file path.')
flags.DEFINE_string('network', 'network.py', 'network file name')
flags.DEFINE_string('data_dir', None, 'dataset location')
flags.DEFINE_integer('small_chunk', 1, 'accumulate gradients.')
flags.DEFINE_string('memory_saving_method', None,
                    'setup the memory saving method, 1. recomputing 2. TBD ')
flags.DEFINE_enum('lr_policy', 'multistep', ('multistep', 'exp'),
                  'learning_rate policy')
flags.DEFINE_boolean('aug_flip', True,
                     'whether randomly flip left or right dataset')
flags.DEFINE_integer(
    'stop_accu_epoch', 0, 'early stop when accuracy does not increase 1% for'
    'numbers of epochs')
flags.DEFINE_boolean('save_stop', True,
                     'whether to save checkpoint when killing process')
flags.DEFINE_list(
    'aug_list', [], 'Specify a list of augmentation function names to apply '
    'during training.')

import benchmark_cnn
import memory_saving as ms
from myelindl.core import benchmark_handler
import logging