def run_fa(dataset, min_components, max_components): X, y = load_dataset(dataset) data = X n_samples, n_features = data.shape n_labels = len(np.unique(y)) labels = y results = [] for n_components in range(min_components, max_components): print('n_components: ', n_components) for svd_method in ['lapack', 'randomized']: scores = [] data = X.copy() fa = FactorAnalysis(n_components=n_components, svd_method=svd_method, random_state=random_state) t0 = time() fa.fit(X) scores.append(n_components) scores.append(svd_method) scores.append(time() - t0) scores.append(fa.score(X)) results.append(scores) # N-Components vs Log Likelihood plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[3], y_axis_label='Log Liklihood', title=dataset.title() + ': FactorAnalysis', filename='-'.join(['fa', dataset, 'loglike'])) # N-Components vs Time plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[2], y_axis_label='Time', title=dataset.title() + ': FactorAnalysis', filename='-'.join(['fa', dataset, 'time'])) results = np.array(results) np.savetxt('output-csv/' + ('-'.join([dataset, 'fa.csv'])), results, delimiter=",", fmt="%s")
def run_pca(dataset, min_components, max_components): X, y = load_dataset(dataset) data = X n_samples, n_features = data.shape n_labels = len(np.unique(y)) labels = y results = [] for n_components in range(min_components, max_components): print('n_components: ', n_components) for svd_solver in ['auto', 'full', 'randomized']: scores = [] data = X.copy() pca = decomposition.PCA(n_components=n_components, svd_solver=svd_solver, random_state=random_state) t0 = time() pca.fit(X) scores.append(n_components) scores.append(svd_solver) scores.append(time() - t0) scores.append(pca.score(data)) scores.append(pca.explained_variance_ratio_[n_components - 1]) scores.append(np.cumsum(np.round(pca.explained_variance_ratio_, decimals=3) * 100)[n_components - 1]) results.append(scores) # N-Components vs Score plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[3], y_axis_label='Score', title=dataset.title() + ': PCA', filename='-' . join(['pca', dataset, 'score'])) # N-Components vs Variance plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[4], y_axis_label='Variance Ratio', title=dataset.title() + ': PCA', filename='-' . join(['pca', dataset, 'variance'])) # N-Components vs Variance % plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[5], y_axis_label='% Variance', title=dataset.title() + ': PCA', filename='-' . join(['pca', dataset, 'pvar'])) # N-Components vs Time plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[2], y_axis_label='Time', title=dataset.title() + ': PCA', filename='-' . join(['pca', dataset, 'time'])) results = np.array(results) np.savetxt('output-csv/' + ('-' . join([dataset, 'pca.csv'])), results, delimiter=",", fmt="%s")
def run_ica(dataset, min_components, max_components): X, y = load_dataset(dataset) data = X n_samples, n_features = data.shape n_labels = len(np.unique(y)) labels = y results = [] for n_components in range(min_components, max_components): print('n_components: ', n_components) data = X.copy() ica = decomposition.FastICA(n_components=n_components, random_state=random_state, whiten=True) t0 = time() ica.fit(X) elapsed = time() - t0 kurtoses = np.sort(kurtosis(ica.components_)) kurt_vsgauss_vals = [] for component_index in range(min_components, max_components): scores = [] scores.append(n_components) scores.append(component_index) scores.append(time() - t0) scores.append(ica.n_iter_) sources = ica.transform(X) if component_index <= n_components: kurt = kurtoses[component_index - 1] kurt_vsgauss = abs(3 - kurt) else: kurt = 0 kurt_vsgauss = 0 kurt_vsgauss_vals.append(kurt_vsgauss) kurt_norm = sum(kurt_vsgauss_vals) / max_components scores.append(kurt) scores.append(kurt_vsgauss) scores.append(kurt_norm) scores.append(kurtosis(ica.components_, axis=None)) scores.append(kurtosis(sources, axis=None)) results.append(scores) # N-Components vs Kurtosis (Component) plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='Dimensions', y_axis_index=[4], y_axis_label='Kurtosis', title=dataset.title() + ': ICA', filename='-'.join(['ica', dataset, 'kurt'])) # N-Components vs Kurtosis (vs Gaussian) plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='Dimensions', y_axis_index=[5], y_axis_label='Kurtosis (vs Gaussian)', title=dataset.title() + ': ICA', filename='-'.join(['ica', dataset, 'kurt', 'vsgauss'])) # N-Components vs Kurtosis (vs Gaussian) plot_results(np.array(results), constant_index=[1], constant_value=[max_components - 1], x_axis_index=0, x_axis_label='Dimensions', y_axis_index=[6], y_axis_label='Kurtosis (%)', trend_labels=[''], title=dataset.title() + ': ICA', filename='-'.join(['ica', dataset, 'kurt', 'norm'])) # N-Components vs Overall Kurtosis (Component) plot_results(np.array(results), constant_index=[1], constant_value=[1], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[7], y_axis_label='Overall Kurtosis (Component)', trend_labels=[''], title=dataset.title() + ': ICA', filename='-'.join(['ica', dataset, 'kurt', 'overall', 'comp'])) # N-Components vs Overall Kurtosis (Source) plot_results(np.array(results), constant_index=[1], constant_value=[1], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[8], y_axis_label='Overall Kurtosis (Source)', trend_labels=[''], title=dataset.title() + ': ICA', filename='-'.join( ['ica', dataset, 'kurt', 'overall', 'source'])) # N-Components vs Iter plot_results(np.array(results), constant_index=[1], constant_value=[1], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[3], y_axis_label='Iterations', trend_labels=[''], title=dataset.title() + ': ICA', filename='-'.join(['ica', dataset, 'iter'])) # N-Components vs Time plot_results(np.array(results), constant_index=[1], constant_value=[1], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[2], y_axis_label='Time', trend_labels=[''], title=dataset.title() + ': ICA', filename='-'.join(['ica', dataset, 'time'])) results = np.array(results) np.savetxt('output-csv/' + ('-'.join([dataset, 'ica.csv'])), results, delimiter=",", fmt="%s")
os.system("make all") os.chdir("../data") #************************ # RUN INVERSION os.system("../bin/hv_na") sys.exit() #************************ os.chdir("../../") # Move results to result folder os.system("mv full_list.txt " + out_folder) # Save inversion parameters to keep track os.system("cp NA/data/na.in " + out_folder) os.system("cp NA/data/hv_files/hv.in " + out_folder) os.system("cp NA/data/hv_files/NA_MDL/hv_param " + out_folder) lines = open("NA/data/hv_files/hv.in").readlines() # Save observed curve to results folder observed_data = lines[6].split()[0] os.system("cp NA/data/hv_files/OBS/" + observed_data + " " + out_folder) # Save model used for litho to results folder ref_model = lines[8].split()[0] os.system("cp NA/data/hv_files/REF_MDL/" + ref_model + " " + out_folder) # Plot results plot_results(out_folder, observed_data, ref_model)
def optimize_k_means(dataset, min_clusters, max_clusters): if '-' in dataset: X, y = load_reduced(dataset) else: X, y = load_dataset(dataset) data = X n_samples, n_features = data.shape n_labels = len(np.unique(y)) labels = y results = [] for n_clusters in range(min_clusters, max_clusters): print('n_clusters: ', n_clusters) for init in ['k-means++', 'random']: scores = [] estimator = KMeans(n_clusters=n_clusters, init=init, n_init=10, random_state=random_state) t0 = time() estimator.fit(data) scores.append(n_clusters) scores.append(init) scores.append(time() - t0) scores.append(estimator.inertia_) scores.append(metrics.homogeneity_score(labels, estimator.labels_)) scores.append(metrics.completeness_score(labels, estimator.labels_)) scores.append(metrics.v_measure_score(labels, estimator.labels_)) scores.append( metrics.adjusted_rand_score(labels, estimator.labels_)) scores.append( metrics.adjusted_mutual_info_score(labels, estimator.labels_)) scores.append(metrics.silhouette_score(data, estimator.labels_)) results.append(scores) # N-Clusters vs Inertia per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[3], y_axis_label='Inertia', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'inertia'])) # N-Clusters vs Homogeneity Score per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[4], y_axis_label='Homogeneity Score', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'homogeneity'])) # N-Clusters vs Completeness Score per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[5], y_axis_label='Completeness Score', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'completeness'])) # N-Clusters vs V-Measure Score per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[6], y_axis_label='V-Measure Score', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'vmeasure'])) # N-Clusters vs Adjusted Random Score per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[7], y_axis_label='Adjusted Random Score', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'adjustedrand'])) # N-Clusters vs Adjusted Mutual Information Score per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[8], y_axis_label='Adjusted Mutual Information Score', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'adjustedmi'])) # N-Clusters vs Silhouette Score per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[9], y_axis_label='Silhouette Score', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'silhouette'])) # N-Clusters vs Time per Init Centroid plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[2], y_axis_label='Time', title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'time'])) # N-Clusters vs V-Measures per Scoring Method w/o CV plot_results(np.array(results), constant_index=[1], constant_value=['k-means++'], x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[4, 5, 6], y_axis_label='Score', trend_labels=['Homogeneity', 'Completeness', 'V-Measure'], title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'kmeans', 'vmeasure'])) # N-Clusters vs Score per Scoring Method w/o CV plot_results(np.array(results), constant_index=[1], constant_value=['k-means++'], x_axis_index=0, x_axis_label='K-Clusters', y_axis_index=[6, 8, 9], y_axis_label='Score', trend_labels=[ 'V-Measure', 'Adjusted Mutual Information', 'Silhouette' ], title=dataset.title() + ': K-means', filename='-'.join(['km', dataset, 'kmeans', 'scores'])) results = np.array(results) np.savetxt('output-csv/' + ('-'.join([dataset, 'kmeans.csv'])), results, delimiter=",", fmt="%s")
def run_rp(dataset, min_components, max_components): X, y = load_dataset(dataset) data = X n_samples, n_features = data.shape n_labels = len(np.unique(y)) labels = y results = [] for n_components in range(min_components, max_components): print('n_components: ', n_components) for max_iters in [10, 40, 100, 500]: scores = [] times = [] components = [] kurtoses = [] losses = [] for iters in range(0, max_iters): data = X.copy() rp = GaussianRandomProjection(n_components=n_components) t0 = time() rp.fit(X) times.append(time() - t0) components.append(rp.n_components_) kurtoses.append(kurtosis(rp.components_, axis=None)) matrix = rp.components_ new_data = rp.transform(data) new_data_inv = np.dot(new_data, matrix) loss = metrics.mean_squared_error(data, new_data_inv) losses.append(loss) scores.append(n_components) scores.append(max_iters) scores.append(np.mean(np.array(times))) scores.append(np.mean(np.array(components))) scores.append(np.mean(np.array(kurtoses))) scores.append(np.std(np.array(kurtoses))) scores.append(np.mean(np.array(losses))) scores.append(np.std(np.array(losses))) results.append(scores) # N-Components vs Loss plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[6], y_axis_label='Reconstruction Error', title=dataset.title() + ': RP', filename='-'.join(['rp', dataset, 'loss'])) # N-Components vs Loss (STD) plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[7], y_axis_label='Reconstruction Error (STD)', title=dataset.title() + ': RP', filename='-'.join(['rp', dataset, 'lossstd'])) # N-Components vs Kurtosis plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[4], y_axis_label='Kurtosis', title=dataset.title() + ': RP', filename='-'.join(['rp', dataset, 'kurtosis'])) # N-Components vs Kurtosis (STD) plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[5], y_axis_label='Kurtosis (STD)', title=dataset.title() + ': RP', filename='-'.join(['rp', dataset, 'kurtstd'])) # N-Components vs Components plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[3], y_axis_label='Components', title=dataset.title() + ': RP', filename='-'.join(['rp', dataset, 'comp'])) # N-Components vs Time plot_results(np.array(results), trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[2], y_axis_label='Time', title=dataset.title() + ': RP', filename='-'.join(['rp', dataset, 'time'])) results = np.array(results) np.savetxt('output-csv/' + ('-'.join([dataset, 'rp.csv'])), results, delimiter=",", fmt="%s")
def main(): # Get arguments ny_data = args.ny_data mu_style = args.mu_style dist_style = args.dist_style numrunrange = [] #### Set experimental settings in paper pi1 = 0.4 hyp_style = 1 top_arms = 10 alpha0 = 0.1 # generally 0.1 FDRrange = [0] algrange = [0, 1] # both 0: best-arm MAB and 1: Uniform sampling FDR = 0 # Power or FDR plots if args.power_plot == 1: punif = 0 else: punif = 1 if (dist_style == 1): mu_gap = 0.2 mu_best = 8 num_hyp = 500 # fix_na = [50] # fix_tt = [300] # armrange = np.arange(10, 121, 10) # truncrange = np.arange(100, 801, 100) armrange = np.arange(50, 51, 10) truncrange = np.arange(100, 701, 100) fix_na = [30] fix_tt = [300] if punif == 1: armrange = [30] algrange = [0] truncrange = [200] pi1range = np.arange(0.1, 1, 0.1) FDRrange = [0, 3, 5] num_hyp = 1000 fix_pi1 = [0.4] plot_numrun = 80 plot_start = 1 elif (dist_style == 0): mu_gap = 0.3 mu_best = 0.7 num_hyp = 50 # fix_na = [50] # fix_tt = [5000] # armrange = np.arange(5, 36, 5) # truncrange = np.arange(5000, 25001, 1000) truncrange = np.arange(5000, 10001, 5000) armrange = np.arange(5, 11, 5) fix_na = [10] fix_tt = [10000] # Overwrite all settings if New Yorker data if (ny_data == 1): dist_style = 0 mu_gap = 0 mu_style = 0 mu_best = 0 num_hyp = 30 top_arms = 5 fix_tt = [13000] # armrange = np.arange(10, 81, 10) truncrange = np.arange(13000, 13001, 1000) armrange = np.arange(10, 21, 10) ##### Plot results # Plot vs. FDR if args.power_plot == 0: # Plot over pi1 plot_results_punif(truncrange, armrange, [0], dist_style, mu_gap, mu_style, hyp_style, pi1range, num_hyp, 0, 0, top_arms, FDRrange, mu_best, alpha0, plot_numrun=plot_numrun, punif=1, plot_start=plot_start) # Plot over time plot_results_punif(truncrange, armrange, algrange, dist_style, mu_gap, mu_style, hyp_style, fix_pi1, num_hyp, 0, 0, top_arms, [0], mu_best, alpha0, plot_numrun=plot_numrun, punif=1, plot_start=plot_start) # Plot vs. TT if (ny_data == 0): plot_results(truncrange, fix_na, algrange, dist_style, mu_gap, mu_style, hyp_style, pi1, num_hyp, 0, 0, top_arms, FDR, mu_best, 0, 0, 0) # Plot vs. number of arms plot_results(fix_tt, armrange, algrange, dist_style, mu_gap, mu_style, hyp_style, pi1, num_hyp, 0, 0, top_arms, FDR, mu_best, 0, 0, 0)
maximum_flow, lower_bound_ramping, upper_bound_ramping, origin_node_line_relationship, end_node_line_relationship) #model.pprint() solver = SolverFactory("cplex") results = solver.solve(model, tee = True) results_file = open('results.csv', 'w') results_file.write("generating unit, time_period, generation_unit_power, flow" + "\n") for generating_unit in model.indexes_generating_units: for time_period in model.indexes_time_periods: results_file.write(str(generating_unit) + "," + str(time_period) + "," + str(model.power_generating_units[generating_unit, time_period].value) + "," + str(model.flow[generating_unit, time_period].value) + "\n") plot_results(model)
from calculate_multicollinearity import * from train_models import * from evaluate_models_performance import * from evaluate_feature_importance import * from create_table_for_performance_results import * from plot_results import * calculate_multicollinearity() train_models() evaluate_models_performance() evaluate_feature_importance() create_table_for_performance_results() plot_results()
def optimize_em(dataset, min_components, max_components): if '-' in dataset: X, y = load_reduced(dataset) else: X, y = load_dataset(dataset) data = X n_samples, n_features = data.shape n_labels = len(np.unique(y)) labels = y results = [] for n_components in range(min_components, max_components): print('n_components: ', n_components) for init_params in ['kmeans', 'random']: for covariance_type in ['full', 'tied', 'diag', 'spherical']: scores = [] estimator = GaussianMixture(n_components=n_components, init_params=init_params, n_init=10, random_state=random_state, covariance_type=covariance_type, reg_covar=1e-2) t0 = time() estimator.fit(data) scores.append(n_components) scores.append(init_params) scores.append(covariance_type) scores.append(time() - t0) scores.append(estimator.n_iter_) scores.append(estimator.aic(data)) scores.append(estimator.bic(data)) predictions = estimator.predict(data) scores.append(metrics.homogeneity_score(labels, predictions)) scores.append(metrics.completeness_score(labels, predictions)) scores.append(metrics.v_measure_score(labels, predictions)) scores.append(metrics.adjusted_rand_score(labels, predictions)) scores.append( metrics.adjusted_mutual_info_score(labels, predictions)) scores.append(metrics.silhouette_score(data, predictions)) #print estimator.converged_ results.append(scores) # N-Components vs AIC per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[5], y_axis_label='Akaike Info Criterion', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'akaike'])) # N-Components vs BIC per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[6], y_axis_label='Bayesian Info Criterion', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'bayesian'])) # N-Components vs Homogeneity Score per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[7], y_axis_label='Homogeneity Score', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'homogeneity'])) # N-Components vs Completeness Score per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[8], y_axis_label='Completeness Score', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'completeness'])) # N-Components vs V-Measure Score per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[9], y_axis_label='V-Measure Score', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'vmeasure'])) # N-Components vs Adjusted Random Score per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[10], y_axis_label='Adjusted Random Score', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'adjustedrand'])) # N-Components vs Adjusted Mutual Information Score per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[11], y_axis_label='Adjusted Mutual Information Score', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'adjustedmi'])) # N-Components vs Silhouette Score per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[12], y_axis_label='Silhouette Score', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'silhouette'])) # N-Components vs Time per Init Centroid plot_results(np.array(results), constant_index=[2], constant_value=['full'], trends_index=1, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[3], y_axis_label='Time', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'time'])) # N-Components vs Criterions per Scoring Method plot_results(np.array(results), constant_index=[1, 2], constant_value=['kmeans', 'full'], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[7, 8, 9], y_axis_label='Score', trend_labels=['Homogeneity', 'Completeness', 'V-Measure'], title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'kmeans', 'vmeasure'])) # N-Components vs V-Measures per Scoring Method plot_results(np.array(results), constant_index=[1, 2], constant_value=['kmeans', 'full'], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[5, 6], y_axis_label='Score', trend_labels=['Akaike', 'Bayesian'], title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'kmeans', 'criterion'])) # N-Components vs Score per Scoring Method plot_results(np.array(results), constant_index=[1, 2], constant_value=['kmeans', 'full'], x_axis_index=0, x_axis_label='K-Components', y_axis_index=[9, 11, 12], y_axis_label='Score', trend_labels=[ 'V-Measure', 'Adjusted Mutual Information', 'Silhouette' ], title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'kmeans', 'scores'])) # N-Components vs AIC per Covariance plot_results(np.array(results), constant_index=[1], constant_value=['kmeans'], trends_index=2, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[5], y_axis_label='Akaike Info Criterion', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'kmeans', 'aic'])) # N-Components vs BIC per Covariance plot_results(np.array(results), constant_index=[1], constant_value=['kmeans'], trends_index=2, x_axis_index=0, x_axis_label='K-Components', y_axis_index=[6], y_axis_label='Bayesian Info Criterion', title=dataset.title() + ': Expectation-Maximization', filename='-'.join(['em', dataset, 'kmeans', 'bic'])) results = np.array(results) np.savetxt('output-csv/' + ('-'.join([dataset, 'expmax.csv'])), results, delimiter=",", fmt="%s")
if WB[1]['typ'] == 'D': ind_wezla = WB[1]['ind'] wart_war_brzeg = WB[1]['wartosc'] iwp = ind_wezla - 1 #indeks wezla w tabeli Pythona wzmacniacz = 10**14 temp = A[iwp, iwp] A[iwp, iwp] = temp * wzmacniacz b[iwp] = temp * wzmacniacz * wart_war_brzeg if WB[0]['typ'] == 'N': print('Jeszcze nie zaimplementowano') if WB[1]['typ'] == 'N': print('Jeszcze nie zaimplementowano') print(A) print('\n') print(b) print('\n') #Obliczenie wyniku x = np.linalg.solve(A, b) print(x) #Wyswietlenie graficznej interpretacji wyniku plot_results(wezly, x)