コード例 #1
0
def callback_recovery(loc):
    d = loc["dict_obj"]
    d.wc.append(emd(loc["dictionary"], d.generating_dict, "chordal", scale=True))
    d.wfs.append(emd(loc["dictionary"], d.generating_dict, "fubinistudy", scale=True))
    d.wcpa.append(
        emd(loc["dictionary"], d.generating_dict, "chordal_principal_angles", scale=True)
    )
    d.wbc.append(emd(loc["dictionary"], d.generating_dict, "binetcauchy", scale=True))
    d.wg.append(emd(loc["dictionary"], d.generating_dict, "geodesic", scale=True))
    d.wfb.append(emd(loc["dictionary"], d.generating_dict, "frobenius", scale=True))
    d.hc.append(hausdorff(loc["dictionary"], d.generating_dict, "chordal", scale=True))
    d.hfs.append(
        hausdorff(loc["dictionary"], d.generating_dict, "fubinistudy", scale=True)
    )
    d.hcpa.append(
        hausdorff(
            loc["dictionary"], d.generating_dict, "chordal_principal_angles", scale=True
        )
    )
    d.hbc.append(
        hausdorff(loc["dictionary"], d.generating_dict, "binetcauchy", scale=True)
    )
    d.hg.append(hausdorff(loc["dictionary"], d.generating_dict, "geodesic", scale=True))
    d.hfb.append(hausdorff(loc["dictionary"], d.generating_dict, "frobenius", scale=True))
    d.dr99.append(detection_rate(loc["dictionary"], d.generating_dict, 0.99))
    d.dr97.append(detection_rate(loc["dictionary"], d.generating_dict, 0.97))
コード例 #2
0
def callback_recovery(loc):
    d = loc['dict_obj']
    d.wc.append(emd(loc['dictionary'], d.generating_dict, 
                    'chordal', scale=True))
    d.wfs.append(emd(loc['dictionary'], d.generating_dict, 
                     'fubinistudy', scale=True))
    d.hc.append(hausdorff(loc['dictionary'], d.generating_dict, 
                          'chordal', scale=True))
    d.hfs.append(hausdorff(loc['dictionary'], d.generating_dict, 
                           'fubinistudy', scale=True))
    d.bd.append(beta_dist(d.generating_dict, loc['dictionary']))
    d.dr99.append(detection_rate(loc['dictionary'],
                                d.generating_dict, 0.99))
    d.dr97.append(detection_rate(loc['dictionary'],
                                d.generating_dict, 0.97))
コード例 #3
0
def callback_recovery(loc):
    d = loc["dict_obj"]
    d.wc.append(
        emd(loc["dictionary"], d.generating_dict, "chordal", scale=True))
    d.wfs.append(
        emd(loc["dictionary"], d.generating_dict, "fubinistudy", scale=True))
    d.hc.append(
        hausdorff(loc["dictionary"], d.generating_dict, "chordal", scale=True))
    d.hfs.append(
        hausdorff(loc["dictionary"],
                  d.generating_dict,
                  "fubinistudy",
                  scale=True))
    d.bd.append(beta_dist(d.generating_dict, loc["dictionary"]))
    d.dr99.append(detection_rate(loc["dictionary"], d.generating_dict, 0.99))
    d.dr97.append(detection_rate(loc["dictionary"], d.generating_dict, 0.97))
コード例 #4
0
ファイル: test_dict_metrics.py プロジェクト: sylvchev/mdla
def test_correlation():
    du2 = [randn(n_features,) for i in range(n_kernels)]
    for i in range(len(du2)):
        du2[i] /= norm(du2[i])
    dm2 = [randn(n_features, n_dims) for i in range(n_kernels)]
    for i in range(len(dm2)):
        dm2[i] /= norm(dm2[i])

    assert_equal(100., detection_rate(du, du, 0.97))
    assert_not_equal(100., detection_rate(du, du2, 0.99))
    assert_equal(100., detection_rate(dm, dm, 0.97))
    assert_not_equal(100., detection_rate(dm, dm2, 0.99))
    assert_equal((100., 100.), precision_recall(du, du, 0.97))
    assert_equal((0., 0.), precision_recall(du, du2, 0.99))
    assert_true(allclose(precision_recall_points(du, du),
                            (ones(len(du)), ones(len(du)))))
    assert_true(not allclose(precision_recall_points(du, du2),
                                (ones(len(du)), ones(len(du2)))))
コード例 #5
0
def test_correlation():
    du2 = [randn(n_features, ) for i in range(n_kernels)]
    for i in range(len(du2)):
        du2[i] /= norm(du2[i])
    dm2 = [randn(n_features, n_dims) for i in range(n_kernels)]
    for i in range(len(dm2)):
        dm2[i] /= norm(dm2[i])

    assert_equal(100., detection_rate(du, du, 0.97))
    assert_not_equal(100., detection_rate(du, du2, 0.99))
    assert_equal(100., detection_rate(dm, dm, 0.97))
    assert_not_equal(100., detection_rate(dm, dm2, 0.99))
    assert_equal((100., 100.), precision_recall(du, du, 0.97))
    assert_equal((0., 0.), precision_recall(du, du2, 0.99))
    assert_true(
        allclose(precision_recall_points(du, du),
                 (ones(len(du)), ones(len(du)))))
    assert_true(not allclose(precision_recall_points(du, du2),
                             (ones(len(du)), ones(len(du2)))))
コード例 #6
0
ファイル: example_univariate.py プロジェクト: wangrui6/mdla
def callback_distance(loc):
    ii, iter_offset = loc['ii'], loc['iter_offset']
    n_batches = loc['n_batches']
    if np.mod((ii-iter_offset)/int(n_batches), n_iter) == 0:
        # Compute distance only every 5 iterations, as in previous case
        d = loc['dict_obj']
        d.wasserstein.append(emd(loc['dictionary'], d.generating_dict, 
                                 'chordal', scale=True))
        d.detect_rate.append(detection_rate(loc['dictionary'],
                                              d.generating_dict, 0.99))
        d.objective_error.append(loc['current_cost']) 
コード例 #7
0
ファイル: example_univariate.py プロジェクト: sylvchev/mdla
def callback_distance(loc):
    ii, iter_offset = loc["ii"], loc["iter_offset"]
    n_batches = loc["n_batches"]
    if np.mod((ii - iter_offset) / int(n_batches), n_iter) == 0:
        # Compute distance only every 5 iterations, as in previous case
        d = loc["dict_obj"]
        d.wasserstein.append(
            emd(loc["dictionary"], d.generating_dict, "chordal", scale=True))
        d.detect_rate.append(
            detection_rate(loc["dictionary"], d.generating_dict, 0.99))
        d.objective_error.append(loc["current_cost"])
コード例 #8
0
ファイル: example_univariate.py プロジェクト: wangrui6/mdla
#     dict_init[i] /= norm(dict_init[i], 'fro')
dict_init = None
    
learned_dict = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=n_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
                                n_jobs=n_jobs, learning_rate=learning_rate,
                                kernel_init_len=kernel_init_len, verbose=1,
                                dict_init=dict_init, random_state=rng_global)

# Update learned dictionary at each iteration and compute a distance
# with the generating dictionary
for i in range(max_iter):
    learned_dict = learned_dict.partial_fit(X)
    # Compute the detection rate
    detect_rate.append(detection_rate(learned_dict.kernels_,
                                        generating_dict, 0.99))
    # Compute the Wasserstein distance
    wasserstein.append(emd(learned_dict.kernels_, generating_dict,
                        'chordal', scale=True))
    # Get the objective error
    objective_error.append(learned_dict.error_.sum())
    
plot_univariate(array(objective_error), array(detect_rate),
                array(wasserstein), n_iter, 'univariate-case')
    
# Another possibility is to rely on a callback function such as 
def callback_distance(loc):
    ii, iter_offset = loc['ii'], loc['iter_offset']
    n_batches = loc['n_batches']
    if np.mod((ii-iter_offset)/int(n_batches), n_iter) == 0:
        # Compute distance only every 5 iterations, as in previous case