def exct_mtf_autoencoder_hierarchy(n_clicks, mtf_data, ae_data, hrc_data): print("MTF Autoencoder & Hierarchy") # init result, result_norm = initialize_data() result_resample = exec_ts_resampler(result_norm, mtf_data[0]['image_size']) #(242,28,1) result_ = result_resample.reshape(result_resample.shape[0], 1, result_resample.shape[1]) #(242,28,28) X = toMTFdata(tsdatas=result_, image_size=mtf_data[0]['image_size'], n_bins=mtf_data[0]['n_bins'], strategy=mtf_data[0]['mtf_strategy']) X_expand = np.expand_dims(X, axis=3) all_feature = fit_autoencoder( X_expand, mtf_data[0]['image_size'], ae_data[0]['dimension_feature'], ae_data[0]['optimizer'], (3e-7) * (10**ae_data[0]['learning_rate']), ae_data[0]['activation_function'], ae_data[0]['loss_function'], ae_data[0]['batch_size'], ae_data[0]['epoch']) print(f'feature shape{all_feature.shape}') cluster = hierarchicalClustering(all_feature, hrc_data[0]['number_of_cluster'], hrc_data[0]['linkage']) send_result_data(result, hrc_data[0]['number_of_cluster'], "MTF Autoencoder & Hierarchy", cluster.labels_, all_feature) return []
def exct_gaf_autoencoder_dbscan(n_clicks, gaf_data, ae_data, dbs_data): print("GAF Autoencoder & DBSCAN 실행중입니다...") # init result, result_norm = initialize_data() result_resample = exec_ts_resampler(result_norm, gaf_data[0]['image_size']) #(242,28,1) result_ = result_resample.reshape(result_resample.shape[0], 1, result_resample.shape[1]) #(242,28,28) X = toGAFdata(tsdatas=result_, image_size=gaf_data[0]['image_size'], method=gaf_data[0]['gaf_method']) X_expand = np.expand_dims(X, axis=3) all_feature = fit_autoencoder( X_expand, gaf_data[0]['image_size'], ae_data[0]['dimension_feature'], ae_data[0]['optimizer'], (3e-7) * (10**ae_data[0]['learning_rate']), ae_data[0]['activation_function'], ae_data[0]['loss_function'], ae_data[0]['batch_size'], ae_data[0]['epoch']) cluster = dbscan(all_feature, eps=dbs_data[0]['epsilon'], min_samples=dbs_data[0]['min_sample']) cluster_num = max(cluster.labels_) + 1 send_result_data(result, cluster_num, "GAF Autoencoder & DBSCAN", cluster.labels_, all_feature) # init result, result_norm = initialize_data() return []
def exct_rp_autoencoder_dbscan(n_clicks, rp_data, ae_data, dbs_data): print("RP Autoencoder & DBSCAN 실행중 입니다...") threshold = rp_data[0]['threshold'] if threshold == 'None': threshold = None # init result, result_norm = initialize_data() result_resample = exec_ts_resampler(result_norm, rp_data[0]['image_size']) #(242,28,1) result_ = result_resample.reshape(result_resample.shape[0], 1, result_resample.shape[1]) #(242,28,28) X = toRPdata(result_, rp_data[0]['dimension'], rp_data[0]['time_delay'], threshold, rp_data[0]['percentage'] / 100) X_expand = np.expand_dims(X, axis=3) all_feature = fit_autoencoder( X_expand, rp_data[0]['image_size'], ae_data[0]['dimension_feature'], ae_data[0]['optimizer'], (3e-7) * (10**ae_data[0]['learning_rate']), ae_data[0]['activation_function'], ae_data[0]['loss_function'], ae_data[0]['batch_size'], ae_data[0]['epoch']) cluster = dbscan(all_feature, eps=dbs_data[0]['epsilon'], min_samples=dbs_data[0]['min_sample']) cluster_num = max(cluster.labels_) + 1 send_result_data(result, cluster_num, "RP Autoencoder & DBSCAN", cluster.labels_, all_feature) return []
def exct_gaf_autoencoder_kmeans(n_clicks, gaf_data, ae_data, km_data): print("GAF Autoencoder & Kmeans 실행중입니다...") # init result, result_norm = initialize_data() result_resample = exec_ts_resampler(result_norm, gaf_data[0]['image_size']) #(242,28,1) result_ = result_resample.reshape(result_resample.shape[0], 1, result_resample.shape[1]) #(242,28,28) X = toGAFdata(tsdatas=result_, image_size=gaf_data[0]['image_size'], method=gaf_data[0]['gaf_method']) X_expand = np.expand_dims(X, axis=3) all_feature = fit_autoencoder( X_expand, gaf_data[0]['image_size'], ae_data[0]['dimension_feature'], ae_data[0]['optimizer'], (3e-7) * (10**ae_data[0]['learning_rate']), ae_data[0]['activation_function'], ae_data[0]['loss_function'], ae_data[0]['batch_size'], ae_data[0]['epoch']) print(f'feature shape{all_feature.shape}') cluster = kmeans(all_feature, km_data[0]['number_of_cluster'], km_data[0]['tolerance'], km_data[0]['try_n_init'], km_data[0]['try_n_kmeans']) send_result_data(result, km_data[0]['number_of_cluster'], "GAF Autoencoder & Kmeans", cluster.labels_, all_feature) return []
def exct_rp_autoencoder_hierarchy(n_clicks, rp_data, ae_data, hrc_data): print("RP Autoencoder & hierarchy 실행중입니다...") threshold = rp_data[0]['threshold'] if threshold == 'None': threshold = None # init result, result_norm = initialize_data() result_resample = exec_ts_resampler(result_norm, rp_data[0]['image_size']) #(242,28,1) result_ = result_resample.reshape(result_resample.shape[0], 1, result_resample.shape[1]) #(242,28,28) X = toRPdata(result_, rp_data[0]['dimension'], rp_data[0]['time_delay'], threshold, rp_data[0]['percentage'] / 100) X_expand = np.expand_dims(X, axis=3) all_feature = fit_autoencoder( X_expand, rp_data[0]['image_size'], ae_data[0]['dimension_feature'], ae_data[0]['optimizer'], (3e-7) * (10**ae_data[0]['learning_rate']), ae_data[0]['activation_function'], ae_data[0]['loss_function'], ae_data[0]['batch_size'], ae_data[0]['epoch']) cluster = hierarchicalClustering(all_feature, hrc_data[0]['number_of_cluster'], hrc_data[0]['linkage']) send_result_data(result, hrc_data[0]['number_of_cluster'], "RP Autoencoder & Hierarchy", cluster.labels_, all_feature) return 0