def callback_distance(loc): ii, iter_offset = loc['ii'], loc['iter_offset'] n_batches = loc['n_batches'] if np.mod((ii-iter_offset)/int(n_batches), n_iter) == 0: # Compute distance only every 5 iterations, as in previous case d = loc['dict_obj'] d.wasserstein.append(emd(loc['dictionary'], d.generating_dict, 'chordal', scale=True)) d.detection_rate.append(detectionRate(loc['dictionary'], d.generating_dict, 0.99)) d.objective_error.append(loc['current_cost'])
# dict_init[i] /= norm(dict_init[i], 'fro') dict_init = None learned_dict = MiniBatchMultivariateDictLearning(n_kernels=n_kernels, batch_size=batch_size, n_iter=n_iter, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, verbose=1, dict_init=dict_init, random_state=rng_global) # Update learned dictionary at each iteration and compute a distance # with the generating dictionary for i in range(max_iter): learned_dict = learned_dict.partial_fit(X) # Compute the detection rate detection_rate.append(detectionRate(learned_dict.kernels_, generating_dict, 0.99)) # Compute the Wasserstein distance wasserstein.append(emd(learned_dict.kernels_, generating_dict, 'chordal', scale=True)) # Get the objective error objective_error.append(learned_dict.error_.sum()) plot_multivariate(array(objective_error), array(detection_rate), 100.-array(wasserstein), n_iter, 'multivariate-case') # Another possibility is to rely on a callback function such as def callback_distance(loc): ii, iter_offset = loc['ii'], loc['iter_offset'] n_batches = loc['n_batches'] if np.mod((ii-iter_offset)/int(n_batches), n_iter) == 0: # Compute distance only every 5 iterations, as in previous case