def elbow_plot_handler_old(request): resp_data = dict() file_name = request.GET.get("file_name") column_header = request.GET.get("column_header") exclude_columns = request.GET.get("exclude_columns") print(column_header) if file_name: fs = FileStorage() file_full_path = fs.get_base_location() + file_name # If the file does exist, read data by panda and drop columns (if any) if fs.is_file(file_full_path): # Get data from file column_header_idx = None if column_header == "on": column_header_idx = 0; df = DataFrameUtil.convert_file_to_dataframe(file_full_path, header=column_header_idx) # Drop column specified by user if exclude_columns: str_column_indexs = exclude_columns.split(",") # column_indexs = list(map(int, str_column_indexs)) column_indexs = [int(i) - 1 for i in str_column_indexs] df = DataFrameUtil.drop_column_by_index(df, column_indexs) is_nan = np.any(np.isnan(df)) is_finite = np.all(np.isfinite(df)) # Standardize data X_scaled = PreProcessingUtil.standardize(df) # Get explain variance ratio pca_helper = PcaUtil() pca = pca_helper.get_fit_transfrom_pca(X_scaled) arr_variance_ratio = pca.explained_variance_ratio_ # Prepare all tabs to display Plot, Table by Bokeh # Add ratio to bokeh line graph elbow_plot = draw_elbow_plot(arr_variance_ratio) # Describe data # df_describe = df.describe().to_json() # df_describe_table = draw_df_describe_table(df) # Add line to a panel tab1 = Panel(child=elbow_plot, title="Elbow Curve Plot") # tab2 = Panel(child=df_describe_table, title="Data Description") # Add a panel to tab tabs = Tabs(tabs=[ tab1 ]) script, div = components(tabs) plots = { 'script': script, 'div': div} resp_data["bokeh_plot"] = plots # resp_data["data_describe"] = bokeh_df_describe_table else: resp_data["msg"] = "[ERROR] File is not found." else: resp_data['msg'] = "[ERROR] File name is invalid." return JsonResponse(resp_data)
def elbow_plot_handler(request): form = PcaPlotForm(request.POST, request.FILES) resp_data = dict(); if form.is_valid(): # Get input files data_file = form.cleaned_data["data_file"] df_input = DataFrameUtil.file_to_dataframe(data_file, header=None) X_scaled = PreProcessingUtil.standardize(df_input) # Get explain variance ratio pca_helper = PcaUtil() pca = pca_helper.get_fit_transfrom_pca(X_scaled) arr_variance_ratio = pca.explained_variance_ratio_ # Prepare all tabs to display Plot, Table by Bokeh # Add ratio to bokeh line graph elbow_plot = draw_elbow_plot(arr_variance_ratio) # Add line to a panel tab1 = Panel(child=elbow_plot, title="Elbow Curve Plot") # tab2 = Panel(child=df_describe_table, title="Data Description") # Add a panel to tab tabs = Tabs(tabs=[ tab1 ]) script, div = components(tabs) plots = { 'script': script, 'div': div} resp_data["bokeh_plot"] = plots else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def process_model_data(model_file_name, data_file_name, data_detail_file_name): # convert file to dataframe fs = FileStorage() # TODO change column_header_idx = None # Dataframe of data to process, it is new data apart from training df_data = DataFrameUtil.convert_file_to_dataframe(fs.get_full_path(data_file_name), \ header=column_header_idx) # Dataframe for matching index with processed data and show detail column_header_idx = 0 df_data_detail = DataFrameUtil.convert_file_to_dataframe(fs.get_full_path(data_detail_file_name), \ header=column_header_idx) # Load model model = ModelUtils.load_model(model_file_name) # TODO!!!!!! change to DB and dynamic # Do PCA logger.debug("Dimensionality Reduction by PCA...") pca_helper = PcaUtil() # Standardize data, reduce dimensions and return as X. X_scaled = PreProcessingUtil.fit_transform(df_data) # TODO change n =100 to dynamic X_reduced = pca_helper.get_pc(X_scaled, n_components=100) pred_y = model.predict(X_reduced) df_label = pd.DataFrame(pred_y, columns=["Label"]) # TODO Keep predicted result as label # https://www.geeksforgeeks.org/different-ways-to-create-pandas-dataframe/ X_graph = pca_helper.get_pc(X_scaled, n_components=2) df_data = pd.DataFrame(X_graph, columns=['PC1', 'PC2']) df_graph = df_label.join(df_data) scrip, div = draw_2d(df_graph, df_data_detail) plot = dict() plot['script'] = scrip plot['div'] = div # Matching detail of data based row/index return plot
def process_pipeline(arr_pipeline, X, y, parameters): result = dict() clf = None # Model score = None for p in arr_pipeline: if p == "sfs": # Select data clf = feature_selection_sfs(X, y, parameters) if isinstance(X, pd.DataFrame): X = DataFrameUtil.get_columns_by_indexes( X, list(clf.k_feature_idx_)) elif isinstance(X, np.ndarray): X = X[:, list(clf.k_feature_idx_)] result["scores"] = clf.k_score_ result['table_columns'] = ['Feature Indexes', 'Feature Names'] # Convert data to array arr_feature_indexes = list(clf.k_feature_idx_) arr_feature_names = list(clf.k_feature_names_) result['table_data'] = [arr_feature_indexes, arr_feature_names] elif p == "select_k_best": # !! Input X must be non-negative. n_k = parameters['select_k_best_n_k'] X = SelectKBest(chi2, k=n_k).fit_transform(X, y) elif p == "scale": # Standardize data X = PreProcessingUtil.fit_transform(X) elif p == "pca": # reduce dimensions and return as X. # logger.debug("Dimensionality Reduction by PCA...") n_components = parameters['pca_n_components'] pca_helper = PcaUtil() X = pca_helper.reduce_dimension(X, n_components) elif p == "kernel_pca": # reduce dimensions and return as X. n_components = parameters['kernel_pca_n_components'] kpca = KernelPCA(n_components=n_components, kernel='rbf', gamma=15) X = kpca.fit_transform(X, y) elif p == "lda": n_components = parameters['lda_n_components'] clf = LinearDiscriminantAnalysis(n_components=n_components) X = clf.fit_transform(X, y) elif p == "tsne": n_components = parameters['tsne_n_components'] clf = TSNE(n_components=n_components) X = clf.fit_transform(X, y) elif p == "svmovo": # Split train, test data based on specified ratio. # Select to create SVM as one vs one or one vs all clf = svm.SVC(gamma='scale', decision_function_shape='ovo') # no fit_transform function for SVC # clf.fit(X, y) elif p == "svmovr": clf = svm.LinearSVC(max_iter=5000) elif p == "kfold": n_folds = parameters['n_folds'] scores = cross_val_score(clf, X, y, cv=n_folds) txt_accuracy = "%0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2) result["scores"] = scores.tolist() result["accuracy_mean"] = scores.mean() elif p == "stratified_kfold": stratified_kfold_n_split = parameters['stratified_kfold_n_split'] stratified_kfold_shuffle = parameters['stratified_kfold_shuffle'] StratifiedKFold(n_splits=stratified_kfold_n_split, shuffle=stratified_kfold_shuffle, random_state=42) elif p == "handout": # Set random_state here to get the same split for different run. test_size = parameters['test_size'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=42) # X = X_test if isinstance(clf, svm.SVC) or isinstance(clf, LinearSVC): clf.fit(X_train, y_train) y = clf.predict(X) else: # t-SNE, not SVM X = clf.fit_transform(X_train, y_train) if not isinstance(clf, TSNE): result["scores"] = clf.score(X_test, y_test).tolist() # result['X'] = X.tolist(); # result['y'] = y; # if p != "sfs" and clf: # result["params"] = clf.get_params(deep=True) print(clf) return result, X, y, clf
def get_reduced_dim_data(df_source, df_target, feature_indexes, target_label_index, arr_target_filter_col, arr_numtypes, arr_criterion, reduce_dim_algorithm, n_components): """ Filter data by criterion and do PCA for 3d reduce_dim_algorithm: Only PCA is implemented for this phase """ # Select only the selected source columns in radiomics df_selected_source = Helper.get_selected_columns_data( df_source, feature_indexes) df_selected_target = Helper.get_selected_columns_data( df_target, arr_target_filter_col) # Use length to split result between source and label later len_selected_source = len(df_selected_source.columns) df_data = df_selected_source.join(df_selected_target) # df_data, arr_criterion_columns, arr_numtypes, arr_criterion arr_criterion_columns = list(df_selected_target.columns) # get_filtered_data(df_data, target_col_indexes, arr_numtypes, arr_criterion_column_names, arr_criterion_value): df_start_res = Helper.get_filtered_data(df_data, arr_numtypes, arr_criterion_columns, arr_criterion) X = df_start_res.iloc[:, 0:len_selected_source] y = df_start_res[[df_target.columns.values[int(target_label_index)]]] # Select target columns # df_selected_source = df_source.iloc[:, arr_int_source_col_idx] # arr_selected_source_col = list(df_selected_source.columns) # Standardize data X_scaled = PreProcessingUtil.standardize(X) # When 3 features are selected, skip doing PCA and directly return result from filtering and standard scalar. if len_selected_source == 3: return X_scaled, y else: dim_3d = [] pca_helper = PcaUtil() if reduce_dim_algorithm == PCA: # Get X transformed by PCA dim_3d, pca = pca_helper.reduce_dimension( X_scaled, n_components=n_components) elif reduce_dim_algorithm == LDA: new_y = None # LDA support only 1 target, so this encode only one target col_y = y.columns.values label_type = y.loc[:, col_y[0]].dtype if label_type == 'object': encoder = EncodingCategoricalFeatures() new_y = encoder.label_encoder(y.loc[:, col_y[0]].values) elif label_type in [np.float64]: raise BizValidationExption( "Target Label", "Data type cannot be float number.") else: new_y = y if isinstance(new_y, pd.core.frame.DataFrame): new_y = y.values n_labels = len(np.unique(new_y)) if n_labels <= 3: raise BizValidationExption( "LDA", "To reduce dimension by LDA to 3 dimensions, number of classes must be greater than 3." ) # Dont specify , n_components=n_components in PCA because the result is different X_transformed, pca = pca_helper.reduce_dimension(X_scaled) dim_3d = LdaUtil.reduce_dimension(X_transformed, new_y.ravel(), n_components=n_components) return dim_3d, y
def supervised_learning_train_test_handler(request): resp_data = dict() process_log = [] msg = [] resp_data['process_log'] = process_log resp_data['msg'] = msg form = SupervisedLearningTrainTestForm(request.GET) # When it's valid, data from screen is converted to Python type # and stored in clean_data if form.is_valid(): sel_algorithm = form.cleaned_data['sel_algorithm'] sel_dim_reduction = form.cleaned_data['sel_dim_reduction'] n_components = form.cleaned_data['n_components'] dataset_file_name = form.cleaned_data['dataset_file_name'] column_header = form.cleaned_data['column_header'] label_file_name = form.cleaned_data['label_file_name'] label_column_header = form.cleaned_data['label_column_header'] test_size = form.cleaned_data['test_size'] sel_test_method = form.cleaned_data['sel_test_method'] n_folds = form.cleaned_data['n_folds'] is_saved = form.cleaned_data['is_saved'] model_file_name = form.cleaned_data['model_file_name'] # Dataframe for storing dataset from file. df = None if fs.is_file_in_base_location(dataset_file_name) \ and fs.is_file_in_base_location(label_file_name): # Get data file and store in data frame. data_file_path = fs.get_base_location() + dataset_file_name # dataset column header checking column_header_idx = None if column_header == "on": column_header_idx = 0 df = DataFrameUtil.convert_file_to_dataframe( data_file_path, header=column_header_idx) # PCA process # Features data X = None if sel_dim_reduction == "pca": logger.debug("Dimensionality Reduction by PCA...") pca_helper = PcaUtil() # Standardize data, reduce dimensions and return as X. X_scaled = PreProcessingUtil.fit_transform(df) X = pca_helper.reduce_dimension(X_scaled, n_components) logger.debug("PCA Done") # Label data y = None label_file_path = fs.get_base_location() + label_file_name label_column_header_idx = None if label_column_header == "on": label_column_header_idx = 0 # Use pandas to read data then change to 1D array y = pd.read_csv(label_file_path, header=label_column_header_idx).values.ravel() clf = None # Model if sel_algorithm: logger.debug("Creating model by SVM...") # Split train, test data based on specified ratio. # Select to create SVM as one vs one or one vs all clf = init_model_object(sel_algorithm) if sel_test_method: logger.debug("Starting Cross Validation...") if sel_test_method == "cv" and n_folds: scores = cross_val_score(clf, X, y, cv=n_folds) txt_accuracy = "%0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2) logger.debug(txt_accuracy) resp_data["scores"] = scores.tolist() resp_data["accuracy_mean"] = scores.mean() resp_data["params"] = clf.get_params(deep=True) else: # Set random_state here to get the same split for different run. X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=42) if is_saved == 1 and model_file_name: clf.fit(X, y) logger.debug("Save model as %s", model_file_name) saved_model_file_name = ModelUtils.save_model( clf, model_file_name) resp_data[ msg. SUCCESS] = "Model has been saved successfully as " + saved_model_file_name else: # File dataset file is not found. msg.append("File name is not found in storage.") else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def pipeline_run_handler(request): resp_data = dict() form = PipelineForm(request.GET) # When it's valid, data from screen is converted to Python type # and stored in clean_data if form.is_valid(): str_pipeline = form.cleaned_data['pipeline'] dataset_file_name = form.cleaned_data['dataset_file_name'] column_header = form.cleaned_data['column_header'] label_file_name = form.cleaned_data['label_file_name'] label_column_header = form.cleaned_data['label_column_header'] # Dimensionality Reduction pca_n_components = form.cleaned_data['pca_n_components'] kernel_pca_n_components = form.cleaned_data['kernel_pca_n_components'] lda_n_components = form.cleaned_data['lda_n_components'] tsne_n_components = form.cleaned_data['tsne_n_components'] # Test test_size = form.cleaned_data['test_size'] n_folds = form.cleaned_data['n_folds'] # Save model save_as_name = form.cleaned_data['save_as_name'] # Feature Selection sfs_k_features = form.cleaned_data['sfs_k_features'] sfs_k_neighbors = form.cleaned_data['sfs_k_neighbors'] sfs_forward = form.cleaned_data['sfs_forward'] sfs_floating = form.cleaned_data['sfs_floating'] sfs_scoring = form.cleaned_data['sfs_scoring'] sfs_cv = form.cleaned_data['sfs_cv'] sfs_n_jobs = form.cleaned_data['sfs_n_jobs'] select_k_best_n_k = form.cleaned_data['select_k_best_n_k'] stratified_kfold_n_split = form.cleaned_data[ 'stratified_kfold_n_split'] stratified_kfold_shuffle = form.cleaned_data[ 'stratified_kfold_shuffle'] # Dataframe for storing dataset from file. df = pd.DataFrame() if fs.is_file_in_base_location(dataset_file_name): # and fs.is_file_in_base_location(label_file_name): # Get data file and store in data frame. data_file_path = fs.get_base_location() + dataset_file_name # dataset column header checking column_header_idx = None if column_header == "on": column_header_idx = 0 df = DataFrameUtil.convert_file_to_dataframe( data_file_path, header=column_header_idx) # PCA process # Features data X = df # Label data y = None # Use pandas to read data then change to 1D array if fs.is_file_in_base_location(label_file_name): label_column_header_idx = None if label_column_header == "on": label_column_header_idx = 0 label_file_path = fs.get_base_location() + label_file_name y = pd.read_csv(label_file_path, header=label_column_header_idx).values.ravel() # process pipeline arr_pipeline = str_pipeline.split(",") parameters = dict() parameters['n_folds'] = n_folds parameters['pca_n_components'] = pca_n_components parameters['kernel_pca_n_components'] = kernel_pca_n_components parameters['lda_n_components'] = lda_n_components parameters['tsne_n_components'] = tsne_n_components parameters['test_size'] = test_size parameters['select_k_best_n_k'] = select_k_best_n_k parameters['stratified_kfold_n_split'] = stratified_kfold_n_split parameters['stratified_kfold_shuffle'] = stratified_kfold_shuffle if sfs_k_features != "": # In case of feature selection, plot result as table # Feature Selection parameters['sfs_k_neighbors'] = sfs_k_neighbors parameters['sfs_k_features'] = sfs_k_features parameters['sfs_forward'] = sfs_forward parameters['sfs_floating'] = sfs_floating parameters['sfs_scoring'] = sfs_scoring parameters['sfs_cv'] = sfs_cv parameters['sfs_n_jobs'] = sfs_n_jobs parameters['feature_names'] = df.columns result, X, y, model = process_pipeline(arr_pipeline, X, y, parameters) print(X) print(y) resp_data = result if save_as_name != "": # If model is not fitted yet, fit the model and save if not ModelUtils.is_fitted(model): model.fit(X, y) save_as_name = ModelUtils.save_model(model, save_as_name) resp_data[ msg. SUCCESS] = "Model has been save successfully as " + save_as_name # Display table that list feature in order. if isinstance(X, np.ndarray) and X.any() \ or isinstance(X, pd.DataFrame) and not X.empty: # Check X dimension nD = X.shape[1] if nD == 2: # For 2D # pca_helper = PcaUtil() # X2d = pca_helper.reduce_dimension(X, n_components=2) df_plot = pd.DataFrame(data=X, columns=['x', 'y']) # df_label = pd.DataFrame(data=y, columns=['label']) df_plot['label'] = y resp_data['plot_data'] = df_plot.to_json() resp_data['dimension'] = 2 elif nD == 3: # For 3D # X3d = pca_helper.reduce_dimension(X, n_components=3) df_plot = pd.DataFrame(data=X, columns=['x', 'y', 'z']) # df_label = pd.DataFrame(data=y, columns=['label']) # df_plot = df_plot.join(df_label) df_plot['label'] = y resp_data['plot_data'] = df_plot.to_json() resp_data['dimension'] = 3 elif nD > 3: # Default to 3D pca_helper = PcaUtil() X = pca_helper.reduce_dimension(X, n_components=3) df_plot = pd.DataFrame(data=X, columns=['x', 'y', 'z']) df_label = pd.DataFrame(data=y, columns=['label']) df_plot = df_plot.join(df_label) resp_data['plot_data'] = df_plot.to_json() resp_data['dimension'] = 3 else: # File dataset file is not found. resp_data[msg.ERROR] = "File name is not found in storage." else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data, safe=False)