示例#1
0
def main(_):
  console.start('RNN task')

  # Configurations
  th = NlsHub(as_global=True)
  th.memory_depth = 10
  th.num_blocks = 1
  th.multiplier = 8
  th.hidden_dim = th.memory_depth * th.multiplier
  th.num_steps = 32

  th.epoch = 100000
  th.batch_size = 32
  th.learning_rate = 1e-4
  th.validation_per_round = 20
  th.print_cycle = 0

  # th.train = False
  th.smart_train = True
  th.max_bad_apples = 4
  th.lr_decay = 0.6

  th.early_stop = True
  th.idle_tol = 20
  th.save_mode = SaveMode.ON_RECORD
  th.warm_up_thres = 1
  th.at_most_save_once_per_round = True

  th.overwrite = True                        # Default: False
  th.export_note = True
  th.summary = True
  th.monitor_preact = False
  th.save_model = True

  th.allow_growth = False
  th.gpu_memory_fraction = 0.4

  description = '0'
  th.mark = 'rnn-{}x({}x{})-{}steps-{}'.format(
    th.num_blocks, th.memory_depth, th.multiplier, th.num_steps, description)
  # Get model
  model = model_lib.rnn0(th)
  # Load data
  train_set, val_set, test_set = load_wiener_hammerstein(
    th.data_dir, depth=th.memory_depth, validation_size=2000)
  assert isinstance(train_set, DataSet)
  assert isinstance(val_set, DataSet)
  assert isinstance(test_set, DataSet)

  # Train or evaluate
  if th.train:
    model.nn.train(train_set, validation_set=val_set, trainer_hub=th)
  else:
    console.show_status('Evaluating ...')
    model.evaluate(train_set, start_at=th.memory_depth)
    model.evaluate(val_set, start_at=th.memory_depth)
    model.evaluate(test_set, start_at=th.memory_depth)

  # End
  console.end()
示例#2
0
def main(_):
    console.start('LSTM task')

    # Configurations
    th = core.th
    th.model = models.lstm_test
    # th.model = models.lstm
    th.num_blocks = 1
    th.memory_depth = 3
    th.hidden_dim = 100

    th.epoch = 50000
    th.learning_rate = 1e-4
    th.batch_size = 512
    th.num_steps = 100
    th.val_preheat = 500
    th.validation_per_round = 0
    th.validate_cycle = 0
    th.print_cycle = 2

    th.train = True
    th.smart_train = False
    th.max_bad_apples = 4
    th.lr_decay = 0.5

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = False
    th.monitor = False

    description = ''
    th.mark = '{}x{}{}'.format(th.num_blocks, th.memory_depth, description)

    core.activate()
示例#3
0
def main(_):
    # Configuration
    # FLAGS.train = False
    # FLAGS.smart_train = True

    FLAGS.overwrite = True
    FLAGS.summary = True
    FLAGS.save_model = False
    FLAGS.snapshot = False

    MEMORY_DEPTH = 1
    EPOCH = 2

    # Start
    console.start('rnn_task')

    # Initiate model
    model = rnn_models.vanilla_RNN('rnn00')

    # Load data
    train_set, val_set, test_set = load_wiener_hammerstein(
        r'../data/wiener_hammerstein/whb.tfd', depth=MEMORY_DEPTH)
    assert isinstance(train_set, DataSet)
    assert isinstance(val_set, DataSet)
    assert isinstance(test_set, DataSet)

    # Train or evaluate
    if FLAGS.train:
        pass
    else:
        console.show_status('Evaluating ...')

    # End
    console.end()
示例#4
0
def main(_):
    console.start('GPAT Classification task (MLP)')

    # Configurations
    th = core.th
    th.model = models.mlp
    th.num_blocks = 2
    th.hidden_dim = 500
    th.actype1 = 'relu'
    th.idle_tol = 30

    th.epoch = 500
    th.learning_rate = 1e-3
    th.batch_size = 64
    th.validation_per_round = 1
    th.print_cycle = 1
    th.shuffle = True

    # th.train = False
    th.smart_train = False
    th.max_bad_apples = 4
    th.lr_decay = 0.6

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = False

    description = 'demo'
    th.mark = 'mlp_{}x{}{}'.format(th.hidden_dim, th.num_blocks, description)

    core.activate()
示例#5
0
def main(_):
    console.start('mlp task')

    # Configurations
    th = NlsHub(as_global=True)
    th.memory_depth = 6
    th.num_blocks = 2
    th.multiplier = 2
    th.hidden_dim = th.memory_depth * th.multiplier
    # th.actype1 = 'lrelu'   # Default: relu

    th.epoch = 10
    th.batch_size = 32
    th.learning_rate = 1e-4
    th.validation_per_round = 5
    th.print_cycle = 100

    th.train = True
    # th.smart_train = True
    # th.max_bad_apples = 4
    # th.lr_decay = 0.6

    th.early_stop = True
    th.idle_tol = 20
    th.save_mode = SaveMode.NAIVE
    # th.warm_up_thres = 1
    # th.at_most_save_once_per_round = True

    th.overwrite = True
    th.export_note = True
    th.summary = False
    # th.monitor = True
    th.save_model = True

    th.allow_growth = False
    th.gpu_memory_fraction = 0.40

    description = '0'
    th.mark = 'mlp-{}x({}x{})-{}'.format(th.num_blocks, th.memory_depth,
                                         th.multiplier, description)
    # Get model
    model = nlsf_model_lib.mlp_00(th)
    # Load data
    train_set, val_set, test_set = load_data(th.data_dir,
                                             depth=th.memory_depth)
    assert isinstance(train_set, DataSet)
    assert isinstance(val_set, DataSet)
    assert isinstance(test_set, DataSet)

    # Train or evaluate
    if th.train:
        model.nn.train(train_set, validation_set=val_set, trainer_hub=th)
    else:
        console.show_status('Evaluating ...')
        model.evaluate(train_set, start_at=th.memory_depth)
        model.evaluate(val_set, start_at=th.memory_depth)
        model.evaluate(test_set, start_at=th.memory_depth, plot=True)

    # End
    console.end()
示例#6
0
def main(_):
    console.suppress_logging()
    FLAGS.train = True
    FLAGS.overwrite = True

    # Start
    console.start('MNIST VANILLA VAE')

    # Get model
    model = models.vanilla('vanilla_00')

    if FLAGS.train:
        mnist = load_mnist('../../data/MNIST',
                           flatten=True,
                           validation_size=0,
                           one_hot=True)
        model.train(training_set=mnist[pedia.training],
                    epoch=1000,
                    batch_size=128,
                    print_cycle=50,
                    snapshot_cycle=200)
    else:
        samples = model.generate(sample_num=16)
        console.show_status('{} samples generated'.format(samples.shape[0]))
        imtool.gan_grid_plot(samples, show=True)

    # End
    console.end()
示例#7
0
def main(_):
    console.start('WHBM task (MLP model)')

    # Configurations
    th = core.th
    th.model = models.mlp
    th.num_blocks = 2
    th.memory_depth = 80
    multiplier = 4
    th.hidden_dim = th.memory_depth * multiplier
    th.actype1 = 'relu'

    th.epoch = 50000
    th.learning_rate = 1e-4
    th.batch_size = 64
    th.validation_per_round = 5
    th.print_cycle = 50
    th.shuffle = True

    # th.train = False
    th.smart_train = True
    th.max_bad_apples = 4
    th.lr_decay = 0.5

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = False

    description = ''
    th.mark = '{}x[{}x{}]{}'.format(th.num_blocks, th.memory_depth, multiplier,
                                    description)

    core.activate()
示例#8
0
def main(_):
  console.suppress_logging()
  FLAGS.train = True
  FLAGS.overwrite = False


  # Start
  console.start("MNIST DCGAN DEMO")

  # Get model
  model = models.dcgan('dcgan_002')
  # model = models.dcgan_h3_rs_nbn()

  # Train or test
  if FLAGS.train:
    mnist = load_mnist('../../data/MNIST', flatten=False, validation_size=0,
                       one_hot=True)
    model.train(training_set=mnist[pedia.training], epoch=10, batch_size=128,
                print_cycle=20, snapshot_cycle=200, D_times=1, G_times=1)
  else:
    samples = model.generate(sample_num=16)
    console.show_status('{} samples generated'.format(samples.shape[0]))
    imtool.gan_grid_plot(samples, show=True)

  # End
  console.end()
示例#9
0
def main(_):
    console.start('CNN task ')

    # Configurations
    th = core.th
    # th.model = models.conv_test
    th.model = models.conv_2d_test
    th.actype1 = 'relu'
    th.patience = 100

    th.epoch = 5000
    th.learning_rate = 1e-3
    th.batch_size = 64
    th.validation_per_round = 1
    th.print_cycle = 10
    th.shuffle = False

    # th.train = False
    th.smart_train = True
    th.max_bad_apples = 4
    th.lr_decay = 0.6

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = False

    th.allow_growth = True
    # th.gpu_memory_fraction =
    description = 'conv_2d_add_noise'
    th.mark = 'cnn_{}x{}{}'.format(th.hidden_dim, th.num_blocks, description)

    core.activate()
示例#10
0
def main(_):
    console.suppress_logging()
    FLAGS.overwrite = True
    FLAGS.train = True

    # Start
    console.start()

    # Get or define model
    model = models.vanilla('vanilla_nov9_02_h2_c', bn=False)
    # model = models.dcgan('dcgan_c00')
    # model = models.vanilla_h3_rs_nbn('vanilla_nov9_01_h3_nbn_opdef')
    # return

    # Train or test
    if FLAGS.train:
        mnist = load_mnist('../../data/MNIST',
                           flatten=True,
                           validation_size=0,
                           one_hot=True)
        model.train(training_set=mnist[pedia.training],
                    epoch=1000,
                    batch_size=128,
                    print_cycle=20,
                    snapshot_cycle=150,
                    sample_num=25)
    else:
        samples = model.generate(sample_num=16)
        console.show_status('{} samples generated'.format(samples.shape[0]))
        imtool.gan_grid_plot(samples, show=True)

    # End
    console.end()
示例#11
0
def main(_):
    console.suppress_logging()

    # Setting
    FLAGS.train = False
    FLAGS.overwrite = True
    # FLAGS.shuffle = True
    show_false_pred = True

    # Start
    console.start('CIFAR-10 CONV DEMO')

    # Get model
    # model = models.deep_conv('dper_do0p5_reg0p2')
    model = models.deep_conv('001_pre_bn')

    # Train or test
    cifar10 = load_cifar10('../../data/CIFAR-10',
                           flatten=False,
                           validation_size=5000,
                           one_hot=True)
    if FLAGS.train:
        model.train(training_set=cifar10[pedia.training],
                    validation_set=cifar10[pedia.validation],
                    epoch=120,
                    batch_size=64,
                    print_cycle=100)
    else:
        model.evaluate_model(cifar10[pedia.test], with_false=show_false_pred)

    # End
    console.end()
示例#12
0
def main(_):
  console.suppress_logging()
  FLAGS.train = TRAIN
  FLAGS.overwrite = OVERWRITE
  console.start('EXP OB 01')
  # Define system
  system = define_system()
  # Generate data
  training_set, validation_set, test_set = generate_data(system)
  if len(SYS_LOCK_ORDERS) == 1:
    homogeneous_check(system, SYS_LOCK_ORDERS[0], training_set.signls[0],
                      training_set.responses[0])
  # Identification
  # .. wiener
  wiener = Wiener(degree=WN_DEGREE, memory_depth=WN_MEN_DEPTH)
  if WIENER_ON: wiener.identify(training_set, validation_set)
  # .. vn
  homo_strs = NN_HOMO_STRS
  vns = collections.OrderedDict()
  for homo_str in homo_strs:
    console.show_status('Volterra Net h**o-strength = {:.2f}'.format(homo_str))
    vn = init_vn('vn_{:.2f}{}'.format(homo_str, POSTFIX), homo_str=homo_str)
    vns[homo_str] = vn
    if FLAGS.train:
      vn.identify(training_set, validation_set,
                  batch_size=50, print_cycle=100, epoch=EPOCH)
  # Verification
  verify(vns, wiener, system, test_set)
  # End
  console.end()
示例#13
0
def main(_):
    console.start('MLP task')

    # Configurations
    th = core.th
    th.model = models.mlp
    th.fc_dims = [800, 500]
    th.actype1 = 'relu'

    th.epoch = 50
    th.learning_rate = 1e-5
    th.batch_size = 64
    th.validation_per_round = 2
    th.print_cycle = 20
    th.shuffle = True

    # th.train = False
    th.smart_train = False
    th.max_bad_apples = 4
    th.lr_decay = 0.6

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = False

    description = ''
    th.mark = 'mlp_{}{}'.format(ms(th.fc_dims), description)

    export_false = True
    core.activate(export_false=export_false)
示例#14
0
def main(_):
    console.suppress_logging()
    FLAGS.train = True
    FLAGS.overwrite = False

    # Start
    console.start('CIFAR-10 DCGAN')

    # Get model
    model = models.dcgan('dcgan_00')

    if FLAGS.train:
        cifar10 = load_cifar10('../../data/CIFAR-10',
                               validation_size=0,
                               one_hot=True)
        model.train(training_set=cifar10[pedia.training],
                    epoch=20000,
                    batch_size=128,
                    print_cycle=20,
                    snapshot_cycle=2000)
    else:
        samples = model.generate(sample_num=16)
        console.show_status('{} samples generated'.format(samples.shape[0]))
        imtool.gan_grid_plot(samples, show=True)

    # End
    console.end()
示例#15
0
def main(_):
    console.suppress_logging()

    FLAGS.train = False
    FLAGS.overwrite = True
    show_false = True
    flatten = False

    # Start
    console.start('MNIST DEMO')

    # model = models.vanilla('003_post')
    model = models.deep_conv('dc_000')

    mnist = load_mnist('../../data/MNIST',
                       flatten=flatten,
                       validation_size=5000,
                       one_hot=True)
    # Train or test
    if FLAGS.train:
        model.train(training_set=mnist[pedia.training],
                    validation_set=mnist[pedia.validation],
                    epoch=30,
                    batch_size=100,
                    print_cycle=50)
    else:
        model.evaluate_model(mnist[pedia.test], with_false=show_false)

    # End
    console.end()
示例#16
0
def main(_):
    FLAGS.overwrite = False
    FLAGS.train = True
    play = True

    console.suppress_logging()
    console.start('TD Gomoku - vanilla')

    with tf.Graph().as_default():
        model = models.mlp00('mlp00_00')

    with tf.Graph().as_default():
        opponent = models.mlp00('mlp00_00')

    game = Game()
    if FLAGS.train:
        model.train(game,
                    episodes=500000,
                    print_cycle=20,
                    snapshot_cycle=300,
                    match_cycle=2000,
                    rounds=5,
                    rate_thresh=1.0,
                    shadow=opponent,
                    save_cycle=200,
                    snapshot_function=game.snapshot)
    else:
        if play:
            TkBoard(player=model).show()
        else:
            model.compete(game, rounds=100, opponent=opponent)

    console.end()
示例#17
0
def main(_):
    console.start('Task CNN (MEMBRANE)')

    th = core.th
    th.job_dir = './records_unet_alpha'
    th.model = models.unet
    th.suffix = '01'

    th.batch_size = 2
    th.learning_rate = 1e-4

    th.epoch = 3
    th.early_stop = True
    th.patience = 5
    th.print_cycle = 1
    th.validation_per_round = 4
    th.val_batch_size = 10
    th.validate_train_set = True
    th.export_tensors_upon_validation = True
    # th.probe_cycle = 1
    th.warm_up = False

    th.save_model = True
    th.overwrite = True
    th.gather_note = True
    th.summary = False
    th.warm_up_thres = 0
    #
    th.train = False
    th.mark = 'unet_{}'.format('x')
    core.activate()
示例#18
0
def main(_):
    console.start('{} on TIMIT task'.format(model_name.upper()))

    th = core.th
    # ---------------------------------------------------------------------------
    # 1. folder/file names and device
    # ---------------------------------------------------------------------------
    th.job_dir += '/{:02d}_{}'.format(id, model_name)
    th.prefix = '{}_'.format(date_string())
    summ_name = model_name
    th.suffix = ''
    th.visible_gpu_id = 0

    # ---------------------------------------------------------------------------
    # 2. model setup
    # ---------------------------------------------------------------------------
    th.model = model
    """
  Layers   state_size   #params
    
  L1
  2x44: 10189
  3x30: 10225
  4x23: 10444
  L2
  2x29: 10146
  3x20: 9945
  L3
  2x23: 9938
  3x16: 9785
  """
    th.num_layers = 1

    th.unit_size = 3
    th.num_units = 30

    # Setting truncate_grad to False works better
    th.truncate_grad = False
    # ---------------------------------------------------------------------------
    # 3. trainer setup
    # ---------------------------------------------------------------------------
    th.epoch = 1000
    th.batch_size = 1

    th.optimizer = 'adam'
    th.learning_rate = 0.0008

    th.validation_per_round = 4
    # ---------------------------------------------------------------------------
    # 4. summary and note setup
    # ---------------------------------------------------------------------------
    th.train = True
    th.overwrite = True
    # ---------------------------------------------------------------------------
    # 5. other stuff and activate
    # ---------------------------------------------------------------------------
    th.mark = '{}({}x{})'.format(model_name, th.unit_size, th.num_units)
    th.gather_summ_name = th.prefix + summ_name + th.suffix + '.sum'
    core.activate()
示例#19
0
文件: t01_hw.py 项目: rscv5/tframe
def main(_):
  console.start('{} on CIFAR-10 task'.format(model_name.upper()))

  th = core.th
  # ---------------------------------------------------------------------------
  # 0. date set setup
  # ---------------------------------------------------------------------------
  # ---------------------------------------------------------------------------
  # 1. folder/file names and device
  # ---------------------------------------------------------------------------
  th.job_dir += '/{:02d}_{}'.format(id, model_name)
  summ_name = model_name
  th.prefix = '{}_'.format(date_string())
  th.suffix = '_t00'
  th.visible_gpu_id = 0

  # ---------------------------------------------------------------------------
  # 2. model setup
  # ---------------------------------------------------------------------------
  th.model = model
  th.centralize_data = True

  th.num_layers = 50
  th.layer_width = 100
  th.spatial_activation = 'tanh'
  th.bias_initializer = -5.

  # ---------------------------------------------------------------------------
  # 3. trainer setup
  # ---------------------------------------------------------------------------
  th.epoch = 200
  th.batch_size = 128
  th.validation_per_round = 1

  th.optimizer = tf.train.AdamOptimizer
  th.learning_rate = 0.0004

  th.patience = 5
  th.early_stop = False
  th.validate_train_set = True
  th.val_decimals = 6

  # ---------------------------------------------------------------------------
  # 4. summary and note setup
  th.export_tensors_upon_validation = True
  # th.export_gates = True

  th.train = True
  th.save_model = True
  th.overwrite = True

  # ---------------------------------------------------------------------------
  # 5. other stuff and activate
  # ---------------------------------------------------------------------------
  th.mark = '{}({}x{}-{})'.format(
    model_name, th.layer_width, th.num_layers, th.spatial_activation)
  th.gather_summ_name = th.prefix + summ_name + th.suffix +  '.sum'
  core.activate()
示例#20
0
def main(_):
    console.start('{} on TIMIT task'.format(model_name.upper()))

    th = core.th
    # ---------------------------------------------------------------------------
    # 1. folder/file names and device
    # ---------------------------------------------------------------------------
    th.job_dir += '/{:02d}_{}'.format(id, model_name)
    th.prefix = '{}_'.format(date_string())
    summ_name = model_name
    th.suffix = ''
    th.visible_gpu_id = 0

    # ---------------------------------------------------------------------------
    # 2. model setup
    # ---------------------------------------------------------------------------
    th.model = model
    """For SxN GDU, params # = 2x(14+SxN)x(SxN)+25xSxN+25 = (53+2xSxN)xSxN+25
     Denote SxN as x, # = 2*x^2 + 53*x + 25
                      x = (sqrt(2609+8*#)-53)/4
     5000: 38; 10000: 58.6
     2 layers: 37;  3 layers: 29;  
  """
    th.num_layers = 1
    layer2config = {
        1: '15x2+5x5+3x1',
        2: '15x2+5x1+2x1',
        3: '15x1+12x1+2x1',
    }
    th.gdu_string = '15x2+7x4'
    th.gdu_string = layer2config[th.num_layers]

    th.state_size = sum([
        np.prod([int(x) for x in g.split('x')])
        for g in th.gdu_string.split('+')
    ])
    # ---------------------------------------------------------------------------
    # 3. trainer setup
    # ---------------------------------------------------------------------------
    th.epoch = 1000
    th.batch_size = 1

    th.optimizer = 'adam'
    th.learning_rate = 0.003

    th.validation_per_round = 4

    # ---------------------------------------------------------------------------
    # 4. summary and note setup
    # ---------------------------------------------------------------------------
    th.train = True
    th.overwrite = True
    # ---------------------------------------------------------------------------
    # 5. other stuff and activate
    # ---------------------------------------------------------------------------
    th.mark = '{}({})'.format(model_name, th.gdu_string)
    th.gather_summ_name = th.prefix + summ_name + th.suffix + '.sum'
    core.activate()
示例#21
0
def main(_):
    console.start('BResNet task')

    description = '0'
    # Configurations
    th = NlsHub(as_global=True)
    th.memory_depth = 80
    th.num_blocks = 3
    th.multiplier = 1
    th.hidden_dim = th.memory_depth * th.multiplier

    th.mark = 'bres-{}x({}x{})-{}'.format(th.num_blocks, th.memory_depth,
                                          th.multiplier, description)
    th.epoch = 50000
    th.batch_size = 64
    th.learning_rate = 0.0001
    th.start_at = 0
    th.reg_strength = 0.000
    th.validation_per_round = 30

    th.train = True
    th.smart_train = True
    th.idle_tol = 30
    th.max_bad_apples = 5
    th.lr_decay = 0.6
    th.early_stop = True
    th.save_mode = SaveMode.ON_RECORD
    th.warm_up_rounds = 50
    th.overwrite = True
    th.export_note = True
    th.summary = False
    th.save_model = False
    # Smoothen
    th.overwrite = th.overwrite and th.start_at == 0

    # Get model
    model = model_lib.bres_net_res0(th)
    # Load data
    train_set, val_set, test_set = load_wiener_hammerstein(
        th.data_dir, depth=th.memory_depth)
    assert isinstance(train_set, DataSet)
    assert isinstance(val_set, DataSet)
    assert isinstance(test_set, DataSet)

    # Train or evaluate
    if th.train:
        model.nn.train(train_set,
                       validation_set=val_set,
                       trainer_hub=th,
                       start_at=th.start_at)
    else:
        model.evaluate(train_set, start_at=th.memory_depth)
        model.evaluate(val_set, start_at=th.memory_depth)
        model.evaluate(test_set, start_at=th.memory_depth)

    # End
    console.end()
示例#22
0
def main(_):
    console.start('trainer.task')

    EPOCH = 1000
    # FLAGS.train = False
    FLAGS.overwrite = True
    # FLAGS.save_best = True
    FLAGS.smart_train = True

    # Hyper parameters
    LEARNING_RATE = 0.001
    LAYER_NUM = 4
    BATCH_SIZE = 32
    MEMORY_DEPTH = 80
    LAYER_DIM = MEMORY_DEPTH * 2
    ACTIVATION = 'relu'

    # Set default flags
    FLAGS.progress_bar = True

    FLAGS.save_model = True
    FLAGS.summary = False
    FLAGS.snapshot = False

    PRINT_CYCLE = 100

    WH_PATH = os.path.join(nls_root, 'data/wiener_hammerstein/whb.tfd')
    MARK = 'mlp00'

    # Get model
    model = model_lib.mlp_00(MARK,
                             MEMORY_DEPTH,
                             LAYER_DIM,
                             LAYER_NUM,
                             LEARNING_RATE,
                             activation=ACTIVATION)

    # Load data set
    train_set, val_set, test_set = load_wiener_hammerstein(WH_PATH,
                                                           depth=MEMORY_DEPTH)
    assert isinstance(train_set, DataSet)
    assert isinstance(val_set, DataSet)
    assert isinstance(test_set, DataSet)

    # Train or evaluate
    if FLAGS.train:
        model.identify(train_set,
                       val_set,
                       batch_size=BATCH_SIZE,
                       print_cycle=PRINT_CYCLE,
                       epoch=EPOCH)
    else:
        model.evaluate(train_set, start_at=MEMORY_DEPTH, plot=False)
        model.evaluate(val_set, start_at=MEMORY_DEPTH, plot=False)
        model.evaluate(test_set, start_at=MEMORY_DEPTH, plot=False)

    console.end()
示例#23
0
def main(_):
    console.start('{} on sCIFAR-10 task'.format(model_name.upper()))

    th = core.th
    # ---------------------------------------------------------------------------
    # 0. date set setup
    # ---------------------------------------------------------------------------
    # ---------------------------------------------------------------------------
    # 1. folder/file names and device
    # ---------------------------------------------------------------------------
    th.job_dir += '/{:02d}_{}'.format(id, model_name)
    summ_name = model_name
    th.prefix = '{}_'.format(date_string())
    th.suffix = ''

    th.visible_gpu_id = 0
    # ---------------------------------------------------------------------------
    # 2. model setup
    # ---------------------------------------------------------------------------
    th.model = model

    th.gdu_string = '5x60'
    th.use_reset_gate = True
    th.sog_version = 1  # sog_v1 is much faster

    th.dropout = 0.1
    th.output_dropout = 0.2
    # ---------------------------------------------------------------------------
    # 3. trainer setup
    # ---------------------------------------------------------------------------
    th.epoch = 10000
    th.batch_size = 128
    th.validation_per_round = 10

    th.optimizer = tf.train.AdamOptimizer
    th.learning_rate = 0.001

    th.clip_threshold = 1.0
    th.clip_method = 'value'
    # ---------------------------------------------------------------------------
    # 4. summary and note setup
    # ---------------------------------------------------------------------------
    th.train = True
    th.save_model = True
    th.overwrite = False

    # ---------------------------------------------------------------------------
    # 5. other stuff and activate
    # ---------------------------------------------------------------------------
    th.mark = GDU.mark()

    th.mark += '_rdp{}odp{}gc{}'.format(th.dropout, th.output_dropout,
                                        th.clip_threshold)

    th.gather_summ_name = th.prefix + summ_name + th.suffix + '.sum'
    core.activate()
示例#24
0
def main(_):
    console.start('basis task')

    # Configurations
    th = core.th
    id = 11
    th.job_dir = 'basis_task'
    th.model = models.multinput_ver_only
    th.actype1 = 'relu'

    th.epoch = 5000
    th.learning_rate = 1e-3
    th.batch_size = 32
    th.validation_per_round = 1
    th.val_batch_size = th.batch_size
    th.print_cycle = 20
    th.patience = 100
    th.shuffle = True

    # th.train = False
    th.smart_train = False
    th.max_bad_apples = 4
    th.lr_decay = 0.6
    th.rand_over_classes = False

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = False

    th.allow_growth = False
    th.gpu_memory_fraction = 0.3

    th.raw_keep_prob = 0.9
    th.mfcc_keep_prob = 0.7
    th.concat_keep_prob = 0.9
    th.fold = 1
    # th.shuffle = False

    th.rand_pos = True
    th.test_all = True
    th.val_on_train_set = False

    th.visible_gpu_id = '1'

    # description = 'raw_data_mfcc_dropout_{}_random_{}_fold_{}'.format(
    #               th.mfcc_keep_prob, th.concat_keep_prob, th.fold)
    # description = 'raw_data_mfcc_dropout_{}_{}'.format(th.mfcc_keep_prob,
    #                                                    th.concat_keep_prob)
    description = 'raw_data_mfcc_model_{}'.format(id)
    # description = 'raw_data_mfcc_simlified_dropout_0.7_reg_0.2_sap_all'
    th.mark = 'cnn_{}'.format(description)

    export_false = True
    core.activate()
示例#25
0
def main(_):
    console.start('mlp task')

    description = 'm'
    # Configurations
    th = NlsHub(as_global=True)
    th.memory_depth = 40
    th.num_blocks = 2
    th.multiplier = 2
    th.hidden_dim = th.memory_depth * th.multiplier

    th.mark = 'mlp-{}x({}x{})-{}'.format(th.num_blocks, th.memory_depth,
                                         th.multiplier, description)
    th.epoch = 50000
    th.batch_size = 64
    th.learning_rate = 0.001
    th.validation_per_round = 20

    th.train = True
    th.smart_train = False
    th.idle_tol = 20
    th.max_bad_apples = 4
    th.lr_decay = 0.6
    th.early_stop = True
    th.save_mode = SaveMode.ON_RECORD
    th.warm_up_rounds = 50
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = True
    th.save_model = False

    th.allow_growth = False
    th.gpu_memory_fraction = 0.4

    # Get model
    model = model_lib.mlp_00(th)
    # Load data
    train_set, val_set, test_set = load_wiener_hammerstein(
        th.data_dir, depth=th.memory_depth)
    assert isinstance(train_set, DataSet)
    assert isinstance(val_set, DataSet)
    assert isinstance(test_set, DataSet)

    # Train or evaluate
    if th.train:
        model.nn.train(train_set, validation_set=val_set, trainer_hub=th)
    else:
        console.show_status('Evaluating ...')
        model.evaluate(train_set, start_at=th.memory_depth)
        model.evaluate(val_set, start_at=th.memory_depth)
        model.evaluate(test_set, start_at=th.memory_depth, plot=True)

    # End
    console.end()
示例#26
0
def main(_):
    console.start('trainer.task')

    # Set default flags
    FLAGS.train = True
    if FLAGS.use_default:
        FLAGS.overwrite = True
        FLAGS.smart_train = False
        FLAGS.save_best = False

    FLAGS.smart_train = True
    FLAGS.save_best = False

    WH_PATH = FLAGS.data_dir

    MARK = 'lottery02'
    MEMORY_DEPTH = 80
    PRINT_CYCLE = 50
    EPOCH = 1000
    LR = 0.000088

    LAYER_DIM = MEMORY_DEPTH * FLAGS.coe
    # ACTIVATION = FLAGS.activation
    ACTIVATION = 'relu'
    # BRANCHES = FLAGS.branches
    BRANCHES = 6
    LR_LIST = [FLAGS.lr1] * (BRANCHES + 1)
    FLAGS.smart_train = True

    # Get model
    model = model_lib.mlp02(MARK,
                            MEMORY_DEPTH,
                            BRANCHES,
                            LAYER_DIM,
                            LR,
                            ACTIVATION,
                            identity_init=True)

    # Load data set
    train_set, val_set, test_set = load_wiener_hammerstein(WH_PATH,
                                                           depth=MEMORY_DEPTH)
    assert isinstance(train_set, DataSet)
    assert isinstance(val_set, DataSet)
    assert isinstance(test_set, DataSet)

    # Train
    if FLAGS.train:
        model.identify(train_set,
                       val_set,
                       batch_size=64,
                       print_cycle=PRINT_CYCLE,
                       epoch=EPOCH,
                       lr_list=LR_LIST)

    console.end()
示例#27
0
def main(_):
    console.start('{} on TO task'.format(model_name.upper()))

    th = core.th
    # ---------------------------------------------------------------------------
    # 0. date set setup
    # ---------------------------------------------------------------------------
    th.sequence_length = 100
    th.bits = 3

    # ---------------------------------------------------------------------------
    # 1. folder/file names and device
    # ---------------------------------------------------------------------------
    th.job_dir += '/{:02d}_{}'.format(id, model_name)
    summ_name = model_name
    th.visible_gpu_id = 0
    th.prefix = '{}_'.format(date_string())
    th.suffix = ''

    # ---------------------------------------------------------------------------
    # 2. model setup
    # ---------------------------------------------------------------------------
    th.model = model

    th.gam_config = '6x10'
    th.head_size = 10

    th.hyper_kernel = 'gru'
    th.state_size = 60
    th.num_layers = 1

    # ---------------------------------------------------------------------------
    # 3. trainer setup
    # ---------------------------------------------------------------------------
    th.max_iterations = 50000
    th.optimizer = tf.train.AdamOptimizer
    th.learning_rate = 0.001

    # ---------------------------------------------------------------------------
    # 4. summary and note setup
    # ---------------------------------------------------------------------------
    th.export_tensors_upon_validation = True

    # ---------------------------------------------------------------------------
    # 5. other stuff and activate
    # ---------------------------------------------------------------------------
    tail = '_{}bits_L{}'.format(th.bits, th.sequence_length)
    th.mark = GamRHN.mark() + tail
    th.gather_summ_name = th.prefix + summ_name + tail + th.suffix + '.sum'
    core.activate()
示例#28
0
def main(_):
    console.start('Multinput task')

    # Configurations
    th = core.th
    th.job_dir = 'res_task'
    th.model = models.res_00
    th.actype1 = 'relu'

    th.epoch = 5000
    th.learning_rate = 1e-3
    th.batch_size = 32
    th.validation_per_round = 1
    th.val_batch_size = th.batch_size
    th.print_cycle = 20
    th.patience = 100
    th.shuffle = True

    # th.train = False
    th.smart_train = False
    th.max_bad_apples = 4
    th.lr_decay = 0.6
    th.rand_over_classes = False

    th.save_model = True
    th.overwrite = True
    th.export_note = True
    th.summary = True
    th.monitor = False

    th.allow_growth = False
    th.gpu_memory_fraction = 0.4

    th.raw_keep_prob = 0.9
    th.mfcc_keep_prob = 0.7
    th.concat_keep_prob = 0.9
    th.fold = 0
    # th.shuffle = False

    th.rand_pos = True

    th.visible_gpu_id = '0'

    # description = 'cnn_raw_data_mfcc_random_rand'
    description = 'raw_data_mfcc_dropout_{}_{}'.format(th.mfcc_keep_prob,
                                                       th.concat_keep_prob)
    th.mark = 'cnn_{}'.format(description)

    export_false = True
    core.activate()
示例#29
0
def main(_):
  console.start('{} on TIMIT task'.format(model_name.upper()))

  th = core.th
  # ---------------------------------------------------------------------------
  # 1. folder/file names and device
  # ---------------------------------------------------------------------------
  th.job_dir += '/{:02d}_{}'.format(id, model_name)
  th.prefix = '{}_'.format(date_string())
  summ_name = model_name
  th.suffix = ''
  th.visible_gpu_id = 0

  # ---------------------------------------------------------------------------
  # 2. model setup
  # ---------------------------------------------------------------------------
  th.model = model

  """
  Layers   state_size   #params
    1          33        10156
    2          21        10378
    3          17        10752
  """
  th.num_layers = 1
  th.state_size = 33

  layer2size = {1: 33, 2: 21, 3: 17, 4: 14}
  th.state_size = layer2size[th.num_layers]
  # ---------------------------------------------------------------------------
  # 3. trainer setup
  # ---------------------------------------------------------------------------
  th.epoch = 1000
  th.batch_size = 1

  th.optimizer = 'adam'
  th.learning_rate = 0.003

  th.validation_per_round = 4
  # ---------------------------------------------------------------------------
  # 4. summary and note setup
  # ---------------------------------------------------------------------------
  th.train = True
  th.overwrite = True
  # ---------------------------------------------------------------------------
  # 5. other stuff and activate
  # ---------------------------------------------------------------------------
  th.mark = '{}({})'.format(model_name, th.state_size)
  th.gather_summ_name = th.prefix + summ_name + th.suffix + '.sum'
  core.activate()
示例#30
0
def main(_):
    console.start('{} on TO task'.format(model_name.upper()))

    th = core.th
    # ---------------------------------------------------------------------------
    # 0. date set setup
    # ---------------------------------------------------------------------------
    th.sequence_length = 100
    th.bits = 3

    # ---------------------------------------------------------------------------
    # 1. folder/file names and device
    # ---------------------------------------------------------------------------
    th.job_dir += '/{:02d}_{}'.format(id, model_name)
    summ_name = model_name
    th.visible_gpu_id = 0
    th.prefix = '{}_'.format(date_string())
    th.suffix = ''

    # ---------------------------------------------------------------------------
    # 2. model setup
    # ---------------------------------------------------------------------------
    th.model = model

    th.fast_layers = 2
    table = {2: 41, 3: 45, 4: 47, 5: 49, 6: 50}
    th.fast_size = table[th.fast_layers]
    th.slow_size = th.fast_size
    th.hyper_kernel = 'lstm'

    th.forget_bias_initializer = 2.0
    # ---------------------------------------------------------------------------
    # 3. trainer setup
    # ---------------------------------------------------------------------------
    th.max_iterations = 50000
    th.optimizer = tf.train.AdamOptimizer
    th.learning_rate = 0.001

    # ---------------------------------------------------------------------------
    # 4. summary and note setup
    # ---------------------------------------------------------------------------
    th.export_tensors_upon_validation = True

    # ---------------------------------------------------------------------------
    # 5. other stuff and activate
    # ---------------------------------------------------------------------------
    tail = '_{}bits_L{}'.format(th.bits, th.sequence_length)
    th.mark = FastSlow.mark() + tail
    th.gather_summ_name = th.prefix + summ_name + tail + th.suffix + '.sum'
    core.activate()