def fetch_results(comm, num_workers, workloads, Dts, Trace, \ previous_stamps, Count_zh, Count_sz, count_h, count_z, \ alpha_zh, beta_zs, Theta_zh, Psi_sz, kernel): Count_zh[:] = 0 Count_zh_buff = np.zeros_like(Count_zh) Count_sz[:] = 0 Count_sz_buff = np.zeros_like(Count_sz) count_h[:] = 0 count_h_buff = np.zeros_like(count_h) count_z[:] = 0 count_z_buff = np.zeros_like(count_z) P = kernel.get_state() P[:] = 0 P_buff = np.zeros_like(P) for worker_id in xrange(1, num_workers + 1): comm.isend(worker_id, dest=worker_id, tag=Msg.SENDRESULTS.value) idx = workloads[worker_id - 1] assign = np.zeros(Trace[idx].shape[0], dtype='i') comm.Recv([assign, MPI.INT], source=worker_id) Trace[:, -1][idx] = assign comm.Recv([Count_zh_buff, MPI.INT], source=worker_id) Count_zh += Count_zh_buff comm.Recv([Count_sz_buff, MPI.INT], source=worker_id) Count_sz += Count_sz_buff comm.Recv([count_h_buff, MPI.INT], source=worker_id) count_h += count_h_buff comm.Recv([count_z_buff, MPI.INT], source=worker_id) count_z += count_z_buff comm.Recv([P_buff, MPI.DOUBLE], source=worker_id) P += P_buff P[:] = P / num_workers kernel.update_state(P) Theta_zh[:] = 0 Psi_sz[:] = 0 _aggregate(Count_zh, Count_sz, count_h, count_z, \ alpha_zh, beta_zs, Theta_zh, Psi_sz) Theta_zh[:] = Theta_zh / Theta_zh.sum(axis=0) Psi_sz[:] = Psi_sz / Psi_sz.sum(axis=0) for z in xrange(Count_zh.shape[0]): previous_stamps._clear_one(z) #dts_assigned = Dts[Trace[:, -1] == z].ravel().copy() #np.sort(dts_assigned) previous_stamps._extend(z, Dts[Trace[:, -1] == z][:, -1])
def correlate_counts(Count_zh, Count_sz, count_h, count_z, \ alpha_zh, beta_zs): #Create Probabilities Theta_zh = np.zeros_like(Count_zh, dtype='f8') Psi_sz = np.zeros_like(Count_sz, dtype='f8') _learn._aggregate(Count_zh, Count_sz, count_h, count_z, \ alpha_zh, beta_zs, Theta_zh, Psi_sz) Theta_hz = Theta_zh.T * count_z Theta_hz = Theta_hz / Theta_hz.sum(axis=0) Psi_sz = Psi_sz / Psi_sz.sum(axis=0) #Similarity between every probability C = np.cov(Theta_hz.T) + np.cov(Psi_sz.T) C /= 2 #Remove lower diag (symmetric) C = np.triu(C, 1) return C
def fit(trace_fpath, num_topics, alpha_zh, beta_zs, kernel, \ residency_priors, num_iter, num_batches, mpi_mode, from_=0, to=np.inf): ''' Learns the latent topics from a temporal hypergraph trace. Here we do a asynchronous learning of the topics similar to AD-LDA, as well as the dynamic topic expansion/pruing. Parameters ---------- trace_fpath : str The path of the trace. Each line should be a \ (timestamp, hypernode, source, destination) where the \ timestamp is a long (seconds or milliseconds from epoch). num_topics : int The number of latent spaces to learn alpha_zh : float The value of the alpha_zh hyperparameter beta_zs : float The value of the beta_zs (beta) hyperaparameter kernel : Kernel object The kernel to use residency_priors : array of float The kernel hyper parameters num_iter : int The number of iterations to learn the model from num_batches : int Defines the number of batches of size num_iter Returns ------- A dictionary with the results. ''' assert num_batches >= 2 comm = MPI.COMM_WORLD num_workers = comm.size - 1 Dts, Trace, previous_stamps, Count_zh, Count_sz, \ count_h, count_z, prob_topics_aux, Theta_zh, Psi_sz, \ hyper2id, source2id = \ dataio.initialize_trace(trace_fpath, num_topics, num_iter, \ from_, to) if mpi_mode: workloads = generate_workload(Count_zh.shape[1], num_workers, Trace) all_idx = np.arange(Trace.shape[0], dtype='i4') for batch in xrange(num_batches): print('Now at batch', batch) if mpi_mode: for worker_id in xrange(1, num_workers + 1): comm.send(num_iter, dest=worker_id, tag=Msg.LEARN.value) dispatch_jobs(Dts, Trace, Count_zh, Count_sz, \ count_h, count_z, alpha_zh, beta_zs, kernel, \ residency_priors, workloads, num_workers, comm) manage(comm, num_workers) fetch_results(comm, num_workers, workloads, Dts, Trace, \ previous_stamps, Count_zh, Count_sz, count_h, \ count_z, alpha_zh, beta_zs, Theta_zh, Psi_sz, \ kernel) else: prob_topics_aux = np.zeros(Count_zh.shape[0], dtype='f8') _learn.em(Dts, Trace, previous_stamps, Count_zh, Count_sz, \ count_h, count_z, alpha_zh, beta_zs, \ prob_topics_aux, Theta_zh, Psi_sz, num_iter, \ num_iter * 2, kernel, False) print('Split') ll_per_z = np.zeros(count_z.shape[0], dtype='f8') _eval.quality_estimate(Dts, Trace, previous_stamps, \ Count_zh, Count_sz, count_h, count_z, alpha_zh, \ beta_zs, ll_per_z, all_idx, kernel) Trace, Count_zh, Count_sz, count_z, previous_stamps, \ P = split(Dts, Trace, previous_stamps, Count_zh, \ Count_sz, count_h, count_z, alpha_zh, beta_zs, \ ll_per_z, kernel) kernel = kernel.__class__() kernel.build(Trace.shape[0], Count_zh.shape[0], residency_priors) if residency_priors.shape[0] > 0: kernel.update_state(P) print('Merge') ll_per_z = np.zeros(count_z.shape[0], dtype='f8') _eval.quality_estimate(Dts, Trace, previous_stamps, \ Count_zh, Count_sz, count_h, count_z, alpha_zh, \ beta_zs, ll_per_z, all_idx, kernel) Trace, Count_zh, Count_sz, count_z, previous_stamps, \ P = merge(Dts, Trace, previous_stamps, Count_zh, \ Count_sz, count_h, count_z, alpha_zh, beta_zs, \ ll_per_z, kernel) kernel = kernel.__class__() kernel.build(Trace.shape[0], Count_zh.shape[0], residency_priors) if residency_priors.shape[0] > 0: kernel.update_state(P) Theta_zh = np.zeros(shape=Count_zh.shape, dtype='f8') Psi_sz = np.zeros(shape=Count_sz.shape, dtype='f8') if batch == num_batches - 1: print('Computing probs') _learn._aggregate(Count_zh, Count_sz, count_h, count_z, \ alpha_zh, beta_zs, Theta_zh, Psi_sz) print('New nz', Count_zh.shape[0]) if mpi_mode: for worker_id in xrange(1, num_workers + 1): comm.send(num_iter, dest=worker_id, tag=Msg.STOP.value) rv = prepare_results(trace_fpath, num_topics, alpha_zh, beta_zs, \ kernel, residency_priors, num_iter, -1, Dts, Trace, \ Count_zh, Count_sz, count_h, count_z, prob_topics_aux, Theta_zh, \ Psi_sz, hyper2id, source2id, from_, to) rv['num_workers'] = np.asarray([num_workers]) rv['num_batches'] = np.asarray([num_batches]) rv['algorithm'] = np.asarray(['parallel dynamic']) return rv