def get_config(): """Get the default hyperparameter configuration.""" config = base_match_config.get_config() config.random_seed = 2 config.model_type = "transformer" num_realizations = 64 config.attention_fn = favor.make_fast_generalized_attention( qkv_dim=num_realizations, features_type='deterministic', kernel_fn=jax.nn.relu, lax_scan_unroll=16) config.model_kwargs = dict(add_pos_emb=False, qk_transform_fn_factory=functools.partial( make_spe_transform_fn, spe_cls=spe.ConvSPE, spe_kwargs=dict( num_realizations=num_realizations, kernel_size=128), shared=True)) config.batch_size = 8 config.learning_rate = 0.005 config.num_train_steps = 15000 config.warmup = 3000 config.eval_frequency = 1500 return config
def get_config(): """Get the hyperparameter configuration.""" config = base_cifar10_config.get_config() config.random_seed = 0 config.model_type = "transformer" config.learning_rate = .00025 config.batch_size = 96 config.eval_frequency = TRAIN_EXAMPLES // config.batch_size config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS config.num_eval_steps = VALID_EXAMPLES // config.batch_size config.factors = 'constant * linear_warmup * cosine_decay' config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1 config.model.dropout_rate = 0.3 config.model.attention_dropout_rate = 0.2 config.model.learn_pos_emb = True config.model.num_layers = 1 config.model.emb_dim = 128 config.model.qkv_dim = 64 config.model.mlp_dim = 128 config.model.num_heads = 8 config.model.classifier_pool = "CLS" config.attention_fn = favor.make_fast_generalized_attention( qkv_dim=config.model.qkv_dim // config.model.num_heads, features_type='deterministic', kernel_fn=jax.nn.relu, lax_scan_unroll=16) return config
def get_config(): """Get the default hyperparameter configuration.""" config = base_tc_config.get_config() config.random_seed = 0 config.model_type = "transformer" config.attention_fn = favor.make_fast_generalized_attention( qkv_dim=config.qkv_dim // config.num_heads, features_type='deterministic', kernel_fn=jax.nn.relu, lax_scan_unroll=16) config.model_kwargs = dict( add_pos_emb=False, qk_transform_fn_factory=functools.partial( make_spe_transform_fn, spe_cls=spe.SineSPE, spe_kwargs=dict( num_realizations=64, num_sines=10 ), ) ) config.batch_size = 8 config.learning_rate = config.learning_rate / 32 * 8 config.num_train_steps = 30000 return config
def get_config(): """Get the default hyperparameter configuration.""" config = base_listops_config.get_config() config.model_type = "transformer" config.attention_fn = favor.make_fast_generalized_attention( qkv_dim=config.qkv_dim // config.num_heads, features_type='ortho', kernel_fn=jax.nn.relu, lax_scan_unroll=16) return config
def get_config(): """Get the default hyperparameter configuration.""" config = base_tc_config.get_config() config.model_type = "transformer" config.attention_fn = favor.make_fast_generalized_attention( qkv_dim=config.qkv_dim // config.num_heads, features_type='deterministic', kernel_fn=lambda x: jax.nn.elu(x) + 1, lax_scan_unroll=16) return config
def get_config(): """Get the default hyperparameter configuration.""" config = base_listops_config.get_config() config.random_seed = 0 config.model_type = "transformer" config.attention_fn = favor.make_fast_generalized_attention( qkv_dim=config.qkv_dim // config.num_heads, features_type='deterministic', kernel_fn=jax.nn.relu, lax_scan_unroll=16) config.batch_size = 8 config.learning_rate = config.learning_rate / 32 * 8 config.num_train_steps = 10000 return config