예제 #1
0
def define_cli_args():
    tf_flags.DEFINE_integer(consts.BATCH_SIZE, None, DESC)
    tf_flags.DEFINE_string(consts.OPTIMIZER, None, DESC)
    tf_flags.DEFINE_float(consts.LEARNING_RATE, None, DESC)
    tf_flags.DEFINE_integer(consts.TRAIN_STEPS, None, DESC)
    tf_flags.DEFINE_integer(consts.EVAL_STEPS_INTERVAL, None, DESC)
    tf_flags.DEFINE_list(consts.EXCLUDED_KEYS, None, DESC)
예제 #2
0
flags.DEFINE_enum('label_dim', '2', ['2', '6'],
                  'Number of features in the output/label time step')
flags.DEFINE_string('output_data_dir', None,
                    'Directory with prediction outputs.')
flags.DEFINE_bool('neutral_losses', False,
                  'True if H2O and NH3 losses are modeled.')
flags.DEFINE_bool(
    'batch_prediction', True,
    'True if batch prediction instead of online was used to generate outputs.')
flags.DEFINE_string(
    'add_input_data_pattern', None,
    ('Input data filename pattern for additional features to-be included to '
     'the final outptu. These inputs should be formatted in the same way as'
     'the model outputs - ie, JSON format with "key" and "output" values,'
     'where the key is an integer and output is a list of feature values.'))
flags.DEFINE_list('add_feature_names', None,
                  'A comma-separated list of additional feature names.')


def reformat_outputs(row, label_dim, neutral_losses):
    """Reformats output from the spectral model into a TSV shape.

  Args:
    row: A pandas series.
    label_dim: A dimensionality of output time (ion type) point.
    neutral_losses: True if NH3/H2O losses should be included, False otherwise.

  Raises:
    ValueError: label_dim is not 2 or 6.

  Returns:
    A pandas series with predicted intensities and ion types added to the input.
예제 #3
0
from sklearn.preprocessing import LabelEncoder

from library.utils import RNNDataset
from library.utils import create_path
from library.utils import BatchManager

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
FLAGS = flags.FLAGS

with open('./data/char_encoder.pkl', 'rb') as f:
    char_encoder = pickle.load(f)

if __name__ == '__main__':
    flags.DEFINE_integer('embedding_size', 100,
                         'Number of units in embedding layer')
    flags.DEFINE_list('num_rnn_layer_units', [128, 128],
                      'Number of units in lstm cells')
    flags.DEFINE_float('keep_prob', 0.8, 'Probabily for lstm nodes to be kept')
    flags.DEFINE_float('learning_rate', 1e-4,
                       'Learning rate for Adam Optimizer')
    flags.DEFINE_integer('batch_size', 500, 'Batch size for training set')
    flags.DEFINE_integer('num_epochs', 200, 'Number of epochs')
    flags.DEFINE_boolean('shuffle', True, 'Whether shuffle the training set')
    flags.DEFINE_integer(
        'eval_frequency', 10, 'Number of steps between validation set '
        'evaluations or model file updates')
    flags.DEFINE_integer(
        'early_stopping_eval_rounds', 5, 'Perform early stop if the loss does '
        'not drop in x evaluation rounds')
    flags.DEFINE_integer('vocab_size', char_encoder.classes_.shape[0],
                         'Number of chars in vocabulary')
from tensorflow import flags
import numpy as np

#######################################################################
### Hyper parameter setting
#######################################################################
# Input data
flags.DEFINE_integer('INPUT_DEPTH', 93, 'The number of terms')
flags.DEFINE_integer('INPUT_WIDTH', 256, 'max length of document')  # 256

# Class
flags.DEFINE_integer('NUM_OF_CLASS', 2, 'positive, negative')

# Parameter
flags.DEFINE_integer('HIDDEN_DIMENSION', 128, 'hidden dimension')  # 128
flags.DEFINE_list('CONV_KERNEL_WIDTH', [19, 13], 'kernel width')  # [19, 13]

# Save
flags.DEFINE_string('WRITER', 'Text_CNN', 'saver name')
flags.DEFINE_boolean('WRITER_generate', True, 'saver generate')
flags.DEFINE_boolean('resume', False, 'resume param')

# Train
flags.DEFINE_integer('BATCH_SIZE', 128, 'batch size')
flags.DEFINE_integer('TEST_BATCH', 128, 'test batch size')
flags.DEFINE_integer('NUM_OF_EPOCH', 20, 'number of epoch')
flags.DEFINE_float('lr_value', 0.01, 'initial learning rate')  #0.01
flags.DEFINE_float('lr_decay', 0.9, 'learning rate decay')  #0.9
flags.DEFINE_multi_integer('Check_Loss', [5] * 20, 'loss decay')

# FLAGS
flags.DEFINE_boolean('shuffle', True,
                     'Whether to shuffle the training set for each epoch')
flags.DEFINE_integer(
    'eval_frequency', 20, 'Number of steps between validation set '
    'evaluations or model file updates')
flags.DEFINE_string('root_logdir', './tf_logs/',
                    'Root directory for storing tensorboard logs')
flags.DEFINE_string('root_model_dir', './tf_models/',
                    'Root directory for storing tensorflow models')
flags.DEFINE_integer('random_state', 666, 'Random state or seed')
flags.DEFINE_float('beta1', 0.5, 'beta1 for AdamOptimizer')
flags.DEFINE_string('data_nm', 'mnist', 'Select from mnist and celeba')
flags.DEFINE_integer('noise_len', 100, 'Length of noise vector')
flags.DEFINE_integer('sample_freq', 100,
                     'Number of steps between sample pic generations')
flags.DEFINE_list('input_img_size', [28, 28, 1], 'Size of input image')


def main(argv=None):
    log_dir, model_dir = generate_log_model_dirs(FLAGS.root_logdir,
                                                 FLAGS.root_model_dir)
    create_path(log_dir)
    create_path(model_dir)

    tf.reset_default_graph()
    with tf.Session() as sess:
        dcgan_nn = DCGAN(sess, log_dir, model_dir)
        dcgan_nn.build_graph(FLAGS)
        dcgan_nn.train(FLAGS)