Esempio n. 1
0
else:
    refined_models = torch.zeros((M, 9))

refined_inliers = torch.zeros((M, Y))
for mi in range(M):
    inliers = all_best_inliers[best_outer_hypo, mi]
    inlier_indices = torch.nonzero(inliers)
    if opt.em:
        new_model = refined_models[mi]
        new_model *= torch.sign(new_model[-1])
    else:
        new_model = best_models[mi]

    refined_models[mi] = new_model
    new_distances = sampling.homography_consistency_measure(new_model, data[0], device)
    new_inliers = sampling.soft_inlier_fun(new_distances, 5. / (opt.threshold2), opt.threshold2)
    refined_inliers[mi] = new_inliers

last_inlier_count = 0
selected_instances = 0
joint_inliers = torch.zeros((Y,))
for mi in range(M):
    joint_inliers = torch.max(joint_inliers, refined_inliers[mi, :])
    inlier_count = torch.sum(joint_inliers, dim=-1)
    new_inliers = inlier_count - last_inlier_count
    last_inlier_count = inlier_count
    print("instance %d: %.2f additional inliers" % (mi, new_inliers))
    if new_inliers < opt.inlier_cutoff:
        break
    selected_instances += 1
estm_models = []
Esempio n. 2
0
                        data = data.to(device)
                        log_probs = model(data_and_state)
                        probs = torch.softmax(log_probs[bi, 0, 0:num_data[bi],
                                                        0],
                                              dim=0)
                        probs /= torch.max(probs)

                    all_probs[oh, mi, :num_data[bi]] = probs

                    for hi in range(opt.hyps):
                        estm_line, choice_vec, _, inlier_count, distances = \
                            sampling.sample_model(
                                data[bi], num_data[bi], inlier_fun, 2, probs, sampling.vp_from_lines,
                                sampling.vp_consistency_measure_angle, device=device)

                        inliers = sampling.soft_inlier_fun(
                            distances, 5. / opt.threshold, opt.threshold)

                        all_choices[oh, mi, hi] = choice_vec
                        all_inliers[oh, mi, hi, 0:num_data[bi]] = inliers
                        all_inlier_counts[oh, mi, hi] = inlier_count
                        all_models[oh, mi, hi] = estm_line

                    best_hypo = torch.argmax(all_inlier_counts[oh, mi])
                    all_best_hypos[oh, mi] = best_hypo

                    if not opt.unconditional:
                        data_and_state[bi, 0:num_data[bi],
                                       ddim - 1] = torch.max(
                                           all_inliers[oh, mi, best_hypo,
                                                       0:num_data[bi]],
                                           data_and_state[bi, 0:num_data[bi],
Esempio n. 3
0
                            losses = []
                            inlier_counts = []

                            cur_probs = torch.softmax(
                                log_probs[bi, :, 0:num_data[bi]].squeeze(),
                                dim=-1)

                            models, inliers, choices, distances = \
                                sampling.sample_model_pool(data[bi], num_data[bi], opt.hyps, minimal_set_size,
                                                           inlier_fun, sampling.homography_from_points,
                                                           sampling.homography_consistency_measure, cur_probs,
                                                           device=device, model_size=model_dim)

                            all_grads[oh, :, mi, bi] = choices

                            inliers = sampling.soft_inlier_fun(
                                distances, 5. / opt.threshold, opt.threshold)

                            all_models[oh, :, mi, bi, :] = models

                            if mi > 0:
                                all_joint_inliers[oh, :, mi, bi] = torch.max(
                                    inliers,
                                    all_best_inliers[oh, mi - 1,
                                                     bi].unsqueeze(0).expand(
                                                         opt.hyps, -1))
                            else:
                                all_joint_inliers[oh, :, mi, bi] = inliers

                            inlier_counts = torch.sum(inliers, dim=-1)
                            cumulative_inlier_counts = torch.sum(
                                all_joint_inliers[oh, :, mi, bi], dim=-1)
Esempio n. 4
0
if opt.problem == "homography":
    for mi in range(M):
        inliers = all_best_inliers[best_outer_hypo, mi]
        inlier_indices = torch.nonzero(inliers)
        if opt.em:
            new_model = refined_models[mi]
            new_model *= torch.sign(new_model[-1])
        else:
            new_model = best_models[mi]

        refined_models[mi] = new_model
        new_distances = sampling.homography_consistency_measure(
            new_model, data[0], device)
        new_inliers = sampling.soft_inlier_fun(new_distances,
                                               5. / (opt.threshold2),
                                               opt.threshold2)
        refined_inliers[mi] = new_inliers

    last_inlier_count = 0
    selected_instances = 0
    joint_inliers = torch.zeros((Y, ))
    for mi in range(M):
        joint_inliers = torch.max(joint_inliers, refined_inliers[mi, :])
        inlier_count = torch.sum(joint_inliers, dim=-1)
        new_inliers = inlier_count - last_inlier_count
        last_inlier_count = inlier_count
        print("instance %d: %.2f additional inliers" % (mi, new_inliers))
        if new_inliers < opt.inlier_cutoff:
            break
        selected_instances += 1