示例#1
0
config = EasyDict()

config.cols_to_use = [0,1,2,3]
# may want to change: feat_size, layer_dims, etc. 
config.model_kwargs = {"num_features":1, "num_classes": 3}
config.data_path = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval.h5"
config.indices_file = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval_idxs.npz"

#make sure to change this
config.dump_path = "/home/dgreen/training_outputs/pointnet2/no_time/adam/"

config.num_data_workers = 0 # Sometime crashes if we do multiprocessing
config.device = 'cuda:6'

config.optimizer = "Adam"
config.optimizer_kwargs = {"lr":1e-3, "betas": (0.9, 0.999)}

config.use_scheduler = False
config.scheduler_kwargs = {"mode":"min", "min_lr":1e-6, "patience":1, "verbose":True}
config.scheduler_step = 190

config.batch_size = 32
config.epochs = 20

config.report_interval = 200
config.num_val_batches  = 256
config.valid_interval   = 1000

config.validate_batch_size = 32
config.validate_dump_interval = 256
示例#2
0
config.num_data_workers = 0  # Sometime crashes if we do multiprocessing
config.device = 'gpu'
config.gpu_list = [6]

config.optimizer = "SGD"
config.optimizer_kwargs = {
    "lr": 0.01,
    "weight_decay": 1e-3,
    "momentum": 0.9,
    "nesterov": True
}

config.scheduler_kwargs = {
    "mode": "min",
    "min_lr": 1e-6,
    "patience": 1,
    "verbose": True
}
config.scheduler_step = 190

config.batch_size = 64
config.epochs = 15

config.report_interval = 50
config.num_val_batches = 16
config.valid_interval = 200

config.validate_batch_size = 64
config.validate_dump_interval = 256
config.use_encoded_data = True