コード例 #1
0
ファイル: example_univariate.py プロジェクト: wangrui6/mdla
# dict_init = [rand(kernel_init_len, n_dims) for i in range(n_kernels)]
# for i in range(len(dict_init)):
#     dict_init[i] /= norm(dict_init[i], 'fro')
dict_init = None
    
learned_dict = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=n_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
                                n_jobs=n_jobs, learning_rate=learning_rate,
                                kernel_init_len=kernel_init_len, verbose=1,
                                dict_init=dict_init, random_state=rng_global)

# Update learned dictionary at each iteration and compute a distance
# with the generating dictionary
for i in range(max_iter):
    learned_dict = learned_dict.partial_fit(X)
    # Compute the detection rate
    detect_rate.append(detection_rate(learned_dict.kernels_,
                                        generating_dict, 0.99))
    # Compute the Wasserstein distance
    wasserstein.append(emd(learned_dict.kernels_, generating_dict,
                        'chordal', scale=True))
    # Get the objective error
    objective_error.append(learned_dict.error_.sum())
    
plot_univariate(array(objective_error), array(detect_rate),
                array(wasserstein), n_iter, 'univariate-case')
    
# Another possibility is to rely on a callback function such as 
def callback_distance(loc):
    ii, iter_offset = loc['ii'], loc['iter_offset']
コード例 #2
0
ファイル: example_multivariate.py プロジェクト: sylvchev/mdla
# dict_init = [rand(kernel_init_len, n_dims) for i in range(n_kernels)]
# for i in range(len(dict_init)):
#     dict_init[i] /= norm(dict_init[i], 'fro')
dict_init = None
    
learned_dict = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=n_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
                                n_jobs=n_jobs, learning_rate=learning_rate,
                                kernel_init_len=kernel_init_len, verbose=1,
                                dict_init=dict_init, random_state=rng_global)

# Update learned dictionary at each iteration and compute a distance
# with the generating dictionary
for i in range(max_iter):
    learned_dict = learned_dict.partial_fit(X)
    # Compute the detection rate
    detection_rate.append(detectionRate(learned_dict.kernels_,
                                        generating_dict, 0.99))
    # Compute the Wasserstein distance
    wasserstein.append(emd(learned_dict.kernels_, generating_dict,
                        'chordal', scale=True))
    # Get the objective error
    objective_error.append(learned_dict.error_.sum())
    
plot_multivariate(array(objective_error), array(detection_rate),
                100.-array(wasserstein), n_iter, 'multivariate-case')
    
# Another possibility is to rely on a callback function such as 
def callback_distance(loc):
    ii, iter_offset = loc['ii'], loc['iter_offset']