コード例 #1
0
ファイル: test_mdla.py プロジェクト: sylvchev/mdla
def test_mdla_shuffle():
    n_kernels = 8
    dico = MiniBatchMultivariateDictLearning(n_kernels=n_kernels,
                    random_state=0, n_iter=3, n_nonzero_coefs=1,
                    verbose=5, shuffle=False)
    code = dico.fit(X).transform(X[0])
    assert_true(len(code[0]) <= 1)
コード例 #2
0
ファイル: test_mdla.py プロジェクト: zsb87/mdla
def test_mdla_shuffle():
    n_samples, n_features, n_dims = 10, 5, 3
    X = [rng_global.randn(n_features, n_dims) for i in range(n_samples)]
    n_kernels = 8
    dico = MiniBatchMultivariateDictLearning(n_kernels=n_kernels,
                                             random_state=0,
                                             n_iter=3,
                                             n_nonzero_coefs=1,
                                             verbose=5,
                                             shuffle=False)
    code = dico.fit(X).transform(X[0])
    assert_true(len(code[0]) <= 1)
コード例 #3
0
n_nonzero_coefs = 2
learning_rate = 5.0
n_iter = 40  # 100
n_jobs, batch_size = -1, None  # n_cpu, 5*n_cpu
figname = "-60ker-K3-klen80-lr5.0-emm-all"

d = MiniBatchMultivariateDictLearning(n_kernels=n_kernels,
                                      batch_size=batch_size,
                                      n_iter=n_iter,
                                      n_nonzero_coefs=n_nonzero_coefs,
                                      n_jobs=n_jobs,
                                      learning_rate=learning_rate,
                                      kernel_init_len=kernel_init_len,
                                      verbose=1,
                                      random_state=rng_global)
d = d.fit(X)

plot_objective_func(d.error_, n_iter, figname)

n_jobs = 4
plot_atom_usage(X, d.kernels_, n_nonzero_coefs, n_jobs, figname)

with open('EEG-savedico' + figname + '.pkl', 'wb') as f:
    o = {
        'kernels': d.kernels_,
        'error': d.error_,
        'kernel_init_len': d.kernel_init_len,
        'learning_rate': d.learning_rate,
        'n_iter': d.n_iter,
        'n_jobs': d.n_jobs,
        'n_kernels': d.n_kernels,
コード例 #4
0
         g, X, code = _generate_testbed(kernel_init_len,
             n_nonzero_coefs, n_kernels, n_samples, n_features,
             n_dims, s)
         d = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
             batch_size=batch_size, n_iter=n_iter,
             n_nonzero_coefs=n_nonzero_coefs, callback=callback_recovery,
             n_jobs=n_jobs, learning_rate=learning_rate,
             kernel_init_len=kernel_init_len, verbose=1,
             random_state=rng_global)
         d.generating_dict = list(g)
         d.wc, d.wfs, d.hc, d.hfs = list(), list(), list(), list()
         d.wcpa, d.wbc, d.wg, d.wfb = list(), list(), list(), list()
         d.hcpa, d.hbc, d.hg, d.hfb = list(), list(), list(), list()
         d.dr99, d.dr97 = list(), list()
         print ('\nExperiment', e+1, 'on', n_experiments)
         d = d.fit(X)
         wc[i, e, :] = array(d.wc); wfs[i, e, :] = array(d.wfs)
         hc[i, e, :] = array(d.hc); hfs[i, e, :] = array(d.hfs)
         wcpa[i, e, :] = array(d.wcpa); wbc[i, e, :] = array(d.wbc)
         wg[i, e, :] = array(d.wg); wfb[i, e, :] = array(d.wfb)
         hcpa[i, e, :] = array(d.hcpa); hbc[i, e, :] = array(d.hbc)
         hg[i, e, :] = array(d.hg); hfb[i, e, :] = array(d.hfb)
         dr99[i, e, :] = array(d.dr99); dr97[i, e, :] = array(d.dr97)
 with open(backup_fname, "w") as f:
     o = {'wc':wc, 'wfs':wfs, 'hc':hc, 'hfs':hfs, 'dr99':dr99, 'dr97':dr97,
          'wcpa':wcpa, 'wbc':wbc, 'wg':wg, 'wfb':wfb, 'hcpa':hcpa,
          'hbc':hbc, 'hg':hg, 'hfb':hfb}
     pickle.dump(o, f)
 # plot_recov(wc, wfs, hc, hfs, dr99, dr97, n_iter, "multivariate_recov")
 plot_recov_all(wc, wfs, wcpa, wbc, wg, wfb, hc, hfs, hcpa, hbc, hg, hfb, dr99, dr97, n_iter, "multivariate_recov_all")
     
コード例 #5
0
      'without multiprocessing:',
      end='')
batch_size, n_jobs = n_samples, 1
learned_dict = MiniBatchMultivariateDictLearning(
    n_kernels=n_kernels,
    batch_size=batch_size,
    n_iter=max_iter,
    n_nonzero_coefs=n_nonzero_coefs,
    n_jobs=n_jobs,
    learning_rate=learning_rate,
    kernel_init_len=kernel_init_len,
    verbose=1,
    dict_init=None,
    random_state=rng_global)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)

# Online with mini-batch
minibatch_range = [cpu_count()]
minibatch_range.extend([cpu_count() * i for i in range(3, 10, 2)])
n_jobs = -1
for mb in minibatch_range:
    print('\nProcessing ',
          max_iter,
          'iterations in online mode, with ',
          'minibatch size',
          mb,
          'and',
コード例 #6
0
generating_dict, X, code = _generate_testbed(kernel_init_len, n_nonzero_coefs,
                                             n_kernels, n_samples, n_features,
                                             n_dims)

# Online without mini-batch
print ('Processing ', max_iter, 'iterations in online mode, '
       'without multiprocessing:', end='')
batch_size, n_jobs =n_samples, 1
learned_dict = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=max_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
                                n_jobs=n_jobs, learning_rate=learning_rate,
                                kernel_init_len=kernel_init_len, verbose=1,
                                dict_init=None, random_state=rng_global)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time()-ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)

# Online with mini-batch
minibatch_range = [cpu_count()]
minibatch_range.extend([cpu_count()*i for i in range(3, 10, 2)])
n_jobs = -1
for mb in minibatch_range:
    print ('\nProcessing ', max_iter, 'iterations in online mode, with ',
           'minibatch size', mb, 'and', cpu_count(), 'processes:', end='')
    batch_size = mb
    learned_dict = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=max_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
コード例 #7
0
ファイル: example_univariate.py プロジェクト: wangrui6/mdla
    n_batches = loc['n_batches']
    if np.mod((ii-iter_offset)/int(n_batches), n_iter) == 0:
        # Compute distance only every 5 iterations, as in previous case
        d = loc['dict_obj']
        d.wasserstein.append(emd(loc['dictionary'], d.generating_dict, 
                                 'chordal', scale=True))
        d.detect_rate.append(detection_rate(loc['dictionary'],
                                              d.generating_dict, 0.99))
        d.objective_error.append(loc['current_cost']) 

# reinitializing the random generator
learned_dict2 = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=max_iter*n_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
                                callback=callback_distance,
                                n_jobs=n_jobs, learning_rate=learning_rate,
                                kernel_init_len=kernel_init_len, verbose=1,
                                dict_init=dict_init, random_state=rng_global)
learned_dict2.generating_dict = list(generating_dict)
learned_dict2.wasserstein = list()
learned_dict2.detect_rate = list()
learned_dict2.objective_error = list()

learned_dict2 = learned_dict2.fit(X)

plot_univariate(array(learned_dict2.objective_error),
                array(learned_dict2.detect_rate),
                array(learned_dict2.wasserstein),
                n_iter=1, figname='univariate-case-callback')
    
コード例 #8
0
ファイル: example_multivariate.py プロジェクト: sylvchev/mdla
    n_batches = loc['n_batches']
    if np.mod((ii-iter_offset)/int(n_batches), n_iter) == 0:
        # Compute distance only every 5 iterations, as in previous case
        d = loc['dict_obj']
        d.wasserstein.append(emd(loc['dictionary'], d.generating_dict, 
                                 'chordal', scale=True))
        d.detection_rate.append(detectionRate(loc['dictionary'],
                                              d.generating_dict, 0.99))
        d.objective_error.append(loc['current_cost']) 

# reinitializing the random generator
learned_dict2 = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, 
                                batch_size=batch_size, n_iter=max_iter*n_iter,
                                n_nonzero_coefs=n_nonzero_coefs,
                                callback=callback_distance,
                                n_jobs=n_jobs, learning_rate=learning_rate,
                                kernel_init_len=kernel_init_len, verbose=1,
                                dict_init=dict_init, random_state=rng_global)
learned_dict2.generating_dict = list(generating_dict)
learned_dict2.wasserstein = list()
learned_dict2.detection_rate = list()
learned_dict2.objective_error = list()

learned_dict2 = learned_dict2.fit(X)

plot_multivariate(array(learned_dict2.objective_error),
                array(learned_dict2.detection_rate),
                100.-array(learned_dict2.wasserstein),
                n_iter=1, figname='multivariate-case-callback')