예제 #1
0
파일: config.py 프로젝트: aliutkus/spe
def get_config():
    """Get the hyperparameter configuration."""
    config = base_cifar10_config.get_config()
    config.random_seed = 1
    config.model_type = "transformer"
    config.learning_rate = .00025
    config.batch_size = 96
    config.eval_frequency = 4 * TRAIN_EXAMPLES // config.batch_size
    config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
    config.num_eval_steps = VALID_EXAMPLES // config.batch_size
    config.factors = 'constant * linear_warmup * cosine_decay'
    config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1

    config.model.dropout_rate = 0.3
    config.model.attention_dropout_rate = 0.2
    config.model.learn_pos_emb = True
    config.model.num_layers = 1
    config.model.emb_dim = 128
    config.model.qkv_dim = 64
    config.model.mlp_dim = 128
    config.model.num_heads = 8
    config.model.classifier_pool = "CLS"
    config.model.add_pos_emb = False
    num_realizations = 32
    config.model.qk_transform_fn_factory = functools.partial(
        make_spe_transform_fn,
        spe_cls=spe.SineSPE,
        spe_kwargs=dict(num_realizations=num_realizations, num_sines=10))
    config.attention_fn = favor.make_fast_softmax_attention(
        qkv_dim=num_realizations, lax_scan_unroll=16)
    return config
예제 #2
0
def get_config():
    """Get the default hyperparameter configuration."""
    config = base_match_config.get_config()
    config.model_type = "transformer"
    config.attention_fn = favor.make_fast_softmax_attention(
        qkv_dim=config.qkv_dim // config.num_heads, lax_scan_unroll=16)
    return config
예제 #3
0
파일: config.py 프로젝트: aliutkus/spe
def get_config():
    """Get the hyperparameter configuration."""
    config = base_cifar10_config.get_config()
    config.random_seed = 0
    config.model_type = "transformer"
    config.learning_rate = .00025
    config.batch_size = 96
    config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
    config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
    config.num_eval_steps = VALID_EXAMPLES // config.batch_size
    config.factors = 'constant * linear_warmup * cosine_decay'
    config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1

    config.model.dropout_rate = 0.3
    config.model.attention_dropout_rate = 0.2
    config.model.learn_pos_emb = True
    config.model.num_layers = 1
    config.model.emb_dim = 128
    config.model.qkv_dim = 64
    config.model.mlp_dim = 128
    config.model.num_heads = 8
    config.model.classifier_pool = "CLS"

    config.attention_fn = favor.make_fast_softmax_attention(
        qkv_dim=config.model.qkv_dim // config.model.num_heads,
        lax_scan_unroll=16)
    return config
예제 #4
0
파일: config.py 프로젝트: aliutkus/spe
def get_config():
  """Get the default hyperparameter configuration."""
  config = base_match_config.get_config()
  config.random_seed = 2
  config.model_type = "transformer"
  num_realizations = 64
  config.model_kwargs = dict(
    add_pos_emb=False,
    qk_transform_fn_factory=functools.partial(
      make_spe_transform_fn,
      spe_cls=spe.SineSPE,
      spe_kwargs=dict(
        num_realizations=num_realizations,
        num_sines=10
      ),
    )
  )
  config.attention_fn = favor.make_fast_softmax_attention(
    qkv_dim=num_realizations,
    lax_scan_unroll=16)
  config.batch_size = 8
  config.learning_rate = 0.005
  config.num_train_steps = 15000
  config.warmup = 3000
  config.eval_frequency = 1500
  return config
예제 #5
0
파일: config.py 프로젝트: aliutkus/spe
def get_config():
    """Get the default hyperparameter configuration."""
    config = base_listops_config.get_config()
    config.random_seed = 2
    config.model_type = "transformer"
    config.attention_fn = favor.make_fast_softmax_attention(
        qkv_dim=config.qkv_dim // config.num_heads, lax_scan_unroll=16)
    config.batch_size = 8
    config.learning_rate = config.learning_rate / 32 * 8
    config.num_train_steps = 10000
    return config
예제 #6
0
파일: config.py 프로젝트: aliutkus/spe
def get_config():
    """Get the default hyperparameter configuration."""
    config = base_match_config.get_config()
    config.random_seed = 2
    config.model_type = "transformer"
    config.attention_fn = favor.make_fast_softmax_attention(
        qkv_dim=config.qkv_dim // config.num_heads, lax_scan_unroll=16)
    config.batch_size = 8
    config.learning_rate = 0.005
    config.num_train_steps = 15000
    config.warmup = 3000
    config.eval_frequency = 1500
    return config
예제 #7
0
def get_config():
    """Get the default hyperparameter configuration."""
    config = base_tc_config.get_config()
    config.random_seed = 0
    config.model_type = "transformer"
    config.attention_fn = favor.make_fast_softmax_attention(
        qkv_dim=config.qkv_dim // config.num_heads, lax_scan_unroll=16)
    config.model_kwargs = dict(add_pos_emb=False,
                               qk_transform_fn_factory=functools.partial(
                                   make_spe_transform_fn,
                                   spe_cls=spe.ConvSPE,
                                   spe_kwargs=dict(num_realizations=64,
                                                   kernel_size=128),
                                   shared=True))
    config.batch_size = 8
    config.learning_rate = config.learning_rate / 32 * 8
    config.num_train_steps = 30000
    return config