class Config: optim: Optim model: str = chika.choices("resnet20", "resnet56", "se_resnet56", "wrn28_2", "resnext29_32x4d") batch_size: int = 128 use_amp: bool = False seed: int = 1 gpu: int = chika.bounded(0, 0, torch.cuda.device_count())
class Optim: epochs: int = 200 name: str = chika.choices("abel", "steps", "cosine") lr: float = 0.1 weight_decay: float = 5e-4 gamma: float = 0.1 steps: List[int] = chika.sequence(100, 150)
class Config: model: str = chika.choices(*MODEL_REGISTRY.choices()) batch_size: int = 128 epochs: int = 200 lr: float = 0.1 weight_decay: float = 1e-4 data: str = chika.choices("cifar10", "cifar100", "svhn") bn_no_wd: bool = False use_amp: bool = False use_accimage: bool = False use_multi_tensor: bool = False use_channel_last: bool = False prefetch_factor: int = 2 persistent_workers: bool = False debug: bool = False download: bool = False
class Config: name: str = chika.choices(*MODEL_REGISTRY.choices()) batch_size: int = 128 epochs: int = 200 lr: float = 0.1 weight_decay: float = 1e-4 lr_decay: float = 0.1 bn_no_wd: bool = False use_amp: bool = False use_accimage: bool = False use_prefetcher: bool = False debug: bool = False
class ModelConfig: name: str = chika.choices(*MLPMixers.choices()) droppath_rate: float = 0.1 grad_clip: float = 1 ema: bool = False ema_rate: float = chika.bounded(0.999, 0, 1)