Exemplo n.º 1
0
def load_weights(path=None, name="RLLRegModel", device = "cuda:0", num_iters=100, K=200, feature_model="vonmises",
                 voxel_size=0.3, use_attention=True, epsilon=float(1e-5), s=0.4,
                 use_dare_weights=False, ds_rate=1.0, mean_init=True, debug=False,
                 cluster_init="default", cluster_precision_scale=1.0, downsample_online=True, max_num_points=14000,
                 cluster_mean_scale=1.0, num_channels=16, conv1_ksize=3, update_priors=False, train_s=False):

    params = get_default_feature_dare_parameters()
    params.name = name
    params.device = device
    params["feature_distr_parameters"] = edict(num_channels=num_channels,
                                               s=s,
                                               model=feature_model,
                                               )
    params.layer = "final"
    params.num_iters = num_iters
    params.backprop_iter = range(0, num_iters+1)
    params.K = K
    params.cluster_init = cluster_init
    params.cluster_precision_init = 'scaled'
    params.cluster_precision_scale = cluster_precision_scale
    params.fix_cluster_pos_iter = 2
    params.use_dare_weighting = use_dare_weights
    params.debug = debug
    params.epsilon = epsilon
    params.gamma = float(0.005)
    params.voxel_size = voxel_size
    params.use_attention = use_attention
    params.downsample_online = downsample_online
    params.downsample_rate = ds_rate
    params.ds_method = "random"
    params.max_num_points = max_num_points
    params.mean_init = mean_init
    params.cluster_mean_scale = cluster_mean_scale
    params.conv1_ksize = conv1_ksize
    params.update_priors = update_priors
    params.train_s=train_s

    fmodel = feature_reg_model(params)
    if not path is None:
        act = reg_loss_actors.RLLActor(fmodel, weight=1)
        ch = torch.load(path, map_location="cpu")
        act.load_state_dict(ch["actor"])

        fmodel = act.model
        for x in fmodel.parameters():
            x.requires_grad_(False)
        fmodel.eval()
        fmodel.to(device)
        fmodel.params = params

    return fmodel
Exemplo n.º 2
0
def configure_trainer(model):
    # paper versions are trained with adam, but better performance is acheived using AdamW
    optimizer = optim.AdamW([{
        'params': model.feature_extractor.parameters(),
        'lr': 1e-4
    }],
                            lr=1e-4)

    base_vs_weight = (40.0 - 24.0) / 4.0
    Vs_iter = [
        base_vs_weight / (model.params.num_iters - i)
        for i in range(model.params.num_iters - 1)
    ]
    for i in range(model.params.num_iters - 1):
        if i not in model.params.backprop_iter:
            Vs_iter[i] = 0.0

    weights = dict(Vs=0.0, Vs_iter=Vs_iter)

    compute_loss_iter = 4
    actor = reg_loss_actors.RLLActor(model=model,
                                     dist_thresh=0.3,
                                     weight=weights,
                                     c=1.0,
                                     compute_loss_iter=compute_loss_iter,
                                     vis=None,
                                     alpha=-2.0,
                                     min_corresponence_rate=0.3)

    step_size = 40
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=step_size,
                                             gamma=0.2)
    training_module = trainer(num_epochs=181,
                              optimizer=optimizer,
                              scheduler=lr_scheduler,
                              actor=actor,
                              job_name=workspace,
                              collate_fn=collate_tensorlist)
    return training_module