Beispiel #1
0
def create_config(data):
    config = trf.Config(data)
    config.norm_config = 'linear'
    config.batch_size = 100
    config.noise_factor = 1
    config.noise_sampler = None  # '2gram'
    config.data_factor = 1
    config.data_sampler = config.noise_sampler

    # config.lr_feat = lr.LearningRateTime(1e-4)
    config.lr_net = lr.LearningRateTime(1e-3)  #lr.LearningRateTime(1, 0.5, tc=1e3)
    config.lr_logz = lr.LearningRateTime(0.01)
    config.opt_feat_method = 'adam'
    config.opt_net_method = 'adam'
    config.opt_logz_method = 'adam'
    config.max_epoch = 1000

    config.init_logz = config.get_initial_logz()
    config.init_global_logz = 0

    # config.prior_model_path = 'lstm/lstm_e32_h32x1_BNCE_SGD/model.ckpt'
    # feat config
    # config.feat_config.feat_type_file = '../../tfcode/feat/g4.fs'
    # config.feat_config.feat_cluster = None
    config.feat_config = None

    # net config
    config.net_config.update(task.get_config_rnn(config.vocab_size))
    # config.net_config.l2_reg = 1e-4
    # wb.mkdir('word_emb')
    # config.net_config.load_embedding_path = 'word_emb/ptb_d{}.emb'.format(config.net_config.embedding_dim)

    config.write_dbg = False

    return config
def create_config(data):
    config = trf.Config(data)
    config.write_dbg = False
    config.max_epoch = 1000
    config.batch_size = 100
    config.noise_factor = 1
    config.norm_config = 'multiple'
    config.init_logz = config.get_initial_logz()

    config.lr_feat = lr.LearningRateTime(1, 0.2, tc=1e3)
    config.lr_net = lr.LearningRateTime(
        1, 1., tc=1e3)  #lr.LearningRateTime(1, 0.5, tc=1e3)
    config.lr_logz = lr.LearningRateTime(1, 1., tc=1e3)
    config.opt_feat_method = 'adam'
    config.opt_net_method = 'adam'
    config.opt_logz_method = 'adam'

    # config.prior_model_path = 'lstm/lstm_e32_h32x1_BNCE_SGD/model.ckpt'
    # feat config
    # config.feat_config.feat_type_file = '../../tfcode/feat/g4.fs'
    # config.feat_config.feat_cluster = None
    config.feat_config = None

    # net config
    config.net_config.update(get_config_cnn(config.vocab_size))

    config.write_dbg = True

    # for sampler
    config.sampler_config.learning_rate = 0.1

    return config
def get_config(data):
    config = trf.Config(data)
    # config.pi_0 = data.get_pi0(config.pi_true)
    # config.pi_true = config.pi_0
    config.norm_config = 'linear'
    config.batch_size = 100
    config.noise_factor = 4
    config.data_factor = 0
    config.train_add_noise = False

    # config.noise_sampler = '2gram'

    # config.lr_feat = lr.LearningRateTime(1e-4)
    config.lr_net = lr.LearningRateTime(1e-3)  # lr.LearningRateTime(1, 0.5, tc=1e3)
    config.lr_logz = lr.LearningRateTime(1e-2)
    config.lr_sampler = lr.LearningRateEpochDelay(0.1)
    config.opt_feat_method = 'adam'
    config.opt_net_method = 'adam'
    config.opt_logz_method = 'adam'
    config.max_epoch = 1000
    # sampler
    # config.sampler_config.hidden_layers = 2
    # config.load_sampler = 'sampler/lstm_e200_h200x2/sampler.ckpt'
    # config.fix_sampler = True

    config.init_logz = config.get_initial_logz()
    config.init_global_logz = 0

    config.feat_config = None

    # net config
    config.net_config.update(get_config_rnn(config.vocab_size))
    # config.net_config.l2_reg = 1e-4
    # wb.mkdir('word_emb')
    # config.net_config.load_embedding_path = 'word_emb/ptb_d{}.emb'.format(config.net_config.embedding_dim)

    config.write_dbg = False
    config.add_sampler_as_prior = False

    return config