Beispiel #1
0
 def use_type(x):
     return bool(parsing.strtobool(x))
else:
    __C.num_workers[0] = 4

# Add custom arguments
__C.train.lnorm = ("smooth_l1",
                   edict(choices=["l1", "l2", "smooth_l1"], type=str))
__C.train.weight_decay = edict()
__C.train.weight_decay.default = 2e-6
__C.train.weight_decay.discriminator = 10 * __C.train.weight_decay.default
__C.train.lnorm_mult = (0.996, edict(type=float))
__C.train.adv_mult = (0.004, edict(type=float))
__C.train.task_lambdas = edict()
__C.train.task_lambdas.depths = 1
__C.train.task_lambdas.normals = 1
__C.train.task_lambdas.autoencoder = 1
__C.train.adv_skip = (True, edict(type=lambda x: bool(parsing.strtobool(x))))

# Model args
__C.model = edict()
__C.model.arch = ("rn", edict(type=str, choices=["rn"]))

# Encoder args
# __C.model.backbone = edict()
#  __C.model.backbone.use = True
# __C.model.backbone.kwargs = edict(lightweight=True, layers=None)
__C.model.encoder = edict(kwargs=edict(out_nc=512))

# Midreps args
# __C.model.midreps = edict()
# __C.model.midreps.use = (
#     True,
Beispiel #3
0
 def allowed_type(x):
     return bool(parsing.strtobool(x))
Beispiel #4
0
import torch
from pytorch_lightning.utilities import parsing
from base_config import __C, parse_args_and_set_config, edict, _to_values_only

parse_bool = lambda x: bool(parsing.strtobool(x))

if torch.cuda.is_available():
    __C.orig_dir = (
        "/storage1/samenabar/code/CLMAC/clevr-dataset-gen/datasets/CLEVR_v1.2",
        edict(type=str),
    )
    __C.uni_dir = (
        "/storage1/samenabar/code/CLMAC/clevr-dataset-gen/datasets/CLEVR_Uni_v1.2",
        edict(type=str),
    )
else:
    __C.orig_dir = (
        "/Users/sebamenabar/Documents/datasets/tmp/CLEVR_v1.2",
        edict(type=str),
    )
    __C.uni_dir = (
        "/Users/sebamenabar/Documents/datasets/tmp/CLEVR_Uni_v1.2",
        edict(type=str),
    )

__C.train.num_plot_samples = (32, edict(type=int))
__C.train.augment = (False, edict(type=parse_bool))
__C.train.dataset = ("orig", edict(choices=["orig", "uni", "gqa"]))
__C.train.gradient_clip_val = (8.0, edict(type=float))

__C.train.optimizers = edict()