示例#1
0
def compute2ModelConsistency(model_1, model_2):
	num = 0
	model_1_vector = []
	#piecewise_1 = []
	model_2_vector = []
	#piecewise_2 = []
	nf_1 = computeNumFactors(model_1)
	nf_2 = computeNumFactors(model_2)
	#print(nf_1, nf_2)
	if model_1.W.shape[2] != model_2.W.shape[2]:
		raise ValueError("Models have different parameters!")
	for nf_i in range(nf_1):
		reconstructed_part_1 = vector_conv(model_1.W[:,:,nf_i],shiftVector(model_1.H.shift(0)[nf_i],model_1.H.L),model_1._shifts)
		model_1_vector.append(reconstructed_part_1.flatten())
		#piecewise_1.append(reconstructed_part_1)

	#dif = checkConvolution(model_1,piecewise_1)
	#print(np.linalg.norm(dif))
	for nf_j in range(nf_2):
		reconstructed_part_2 = vector_conv(model_2.W[:, :, nf_j], shiftVector(model_2.H.shift(0)[nf_j], model_2.H.L),
		                                   model_2._shifts)
		model_2_vector.append(reconstructed_part_2.flatten())
	#print(model_1_vector)
	corr_matrix = getCorrMatrix(model_1_vector, model_2_vector)
	corr_matrix = reorderCorrMatrix(corr_matrix)
	for i in range(min(corr_matrix.shape[0],corr_matrix.shape[1])):
		num += corr_matrix[i,i]**2
	return num/np.linalg.norm(corr_matrix)**2
示例#2
0
    def plot(self):
        """
		Just plot W, H and reconstructed X for each pattern and variable, for
		one seqnmf norm.
		"""

        self.NMF_model_list = load_NMF_factors_single_norm(
            self.exp_dir, self.exp_name, self.seqnmf_norm_idx)
        for iV in range(self.num_vars):
            model = self.NMF_model_list[iV]
            num_nonzero_patterns = compute_num_factors(model)

            fig = plt.figure(figsize=(15, 4 * num_nonzero_patterns))
            fig.suptitle('Variable %s' % iV)
            gs = GridSpec(num_nonzero_patterns * 4, 5)
            for iF in range(num_nonzero_patterns):
                reconstruct_x = vector_conv(
                    model.W[:, :, iF],
                    shiftVector(model.H.shift(0)[iF], model.H.L),
                    model._shifts)

                fig.add_subplot(gs[4 * iF + 1:4 * iF + 3, 0])
                plt.imshow(model.W.T[iF])
                fig.add_subplot(gs[4 * iF, 1:-1])
                plt.plot(model.H.shift(0)[iF])
                fig.add_subplot(gs[4 * iF + 1:4 * iF + 3, 1:-1])
                plt.imshow(reconstruct_x)

            plt.show()
示例#3
0
def compute1ModelConsistency(model, Wreal, Hreal):
	num = 0
	model_1_vector = []
	real_vector = []
	#piecewise_1 = []
	nf_1 = computeNumFactors(model)
	for nf_i in range(nf_1):
		reconstructed_part_1 = vector_conv(model.W[:,:,nf_i],shiftVector(model.H.shift(0)[nf_i],model.H.L),model._shifts)
		model_1_vector.append(reconstructed_part_1.flatten())
	for nf_j in range(len(Wreal)):
		reconstructed_part_2 = vector_conv(Wreal[nf_j], shiftVector(Hreal[nf_j], model.H.L), model._shifts)
		real_vector.append(reconstructed_part_2.flatten())
	#print(model_1_vector)
	corr_matrix = getCorrMatrix(model_1_vector, real_vector)
	corr_matrix_reordered = reorderCorrMatrix(np.copy(corr_matrix))
	for i in range(min(corr_matrix_reordered.shape[0],corr_matrix_reordered.shape[1])):
		num += corr_matrix_reordered[i,i]**2
	return num/np.linalg.norm(corr_matrix_reordered)**2, corr_matrix
示例#4
0
    def agg_data(self):
        """
		Aggregate the NMF data into a single file for ease of loading.
		Also calculate reconstruction and regularization errors for 
		each index and variable.
		"""

        cwt_matrix = load_cwt_matrix(self.exp_dir, self.exp_name)
        NMF_idxs = range(int(self.metadata['NMF']['seqnmf_norm_steps']))
        Ws = None
        Hs = None
        Xs = None
        errs = np.empty((len(NMF_idxs), self.num_vars, 2)) * np.nan

        for iR in NMF_idxs:
            print(iR)
            NMF_model_list = load_NMF_factors_single_norm(
                self.exp_dir, self.exp_name, iR)
            if Ws is None:
                W_shape = (len(NMF_idxs), self.num_vars,
                     self.num_max_patterns) + \
                     NMF_model_list[0].W[:, :, 0].shape
                Ws = np.empty(W_shape) * np.nan
            if Hs is None:
                H_shape = (len(NMF_idxs), self.num_vars,
                     self.num_max_patterns) + \
                     NMF_model_list[0].H.shift(0)[0].shape
                Hs = np.empty(H_shape) * np.nan
            if Xs is None:
                X_shape = (len(NMF_idxs), self.num_vars, self.num_max_patterns,
                           W_shape[-1], H_shape[-1])
                Xs = np.empty(X_shape) * np.nan

            for iV in range(self.num_vars):

                # Get W, H, X for each pattern. Full X is sum over patterns.
                for iP in range(self.num_max_patterns):
                    model = NMF_model_list[iV]
                    Ws[iR, iV, iP] = model.W[:, :, iP]
                    Hs[iR, iV, iP] = model.H.shift(0)[iP]
                    Xs[iR, iV, iP] = vector_conv(
                        Ws[iR, iV, iP],
                        shiftVector(model.H.shift(0)[iP], model.H.L),
                        model._shifts)

                norm = np.linalg.norm(cwt_matrix[:, :, iV])
                reconstruct_err = np.linalg.norm(
                    cwt_matrix[:, :, iV] - np.sum(Xs[iR, iV], axis=0)) / norm
                regularize_err = compute_scfo_reg(
                    ShiftMatrix(cwt_matrix[:, :, iV], self.pattern_length),
                    model.W, model.H, model._shifts, model._kernel) / norm**2
                errs[iR, iV, 0] = reconstruct_err
                errs[iR, iV, 1] = regularize_err

        save_all_NMF_data(self.exp_dir, self.exp_name, Ws, Hs, Xs, errs)
示例#5
0
def generateGTData(num_factors,
                   num_neurons,
                   maxlag,
                   total_time,
                   factor_sparseness,
                   time_sparseness,
                   noise_mult_factor,
                   random_state=None):
    data = np.zeros((num_neurons, total_time))
    partial_x_list = []
    factor_list = []
    timetrace_list = []
    for i in range(num_factors):
        if random_state:
            temp_random_state = random_state + 3 * i + 44
            tt_random_state = random_state + i**2 + 8
        else:
            temp_random_state = None
            tt_random_state = None
        temp_factor = rand(num_neurons,
                           2 * maxlag + 1,
                           density=factor_sparseness,
                           random_state=temp_random_state).toarray().T
        temp_timetrace = rand(1,
                              total_time,
                              density=time_sparseness,
                              random_state=tt_random_state).toarray().reshape(
                                  (total_time, ))
        partial_x_temp = vector_conv(temp_factor,
                                     shiftVector(temp_timetrace, maxlag),
                                     np.arange(maxlag * 2 + 1) - maxlag)
        partial_x_list.append(partial_x_temp)
        data += partial_x_temp
        factor_list.append(temp_factor)
        timetrace_list.append(temp_timetrace)
    data += noise_mult_factor * np.random.rand(num_neurons, total_time)
    return factor_list, timetrace_list, partial_x_list, data