def lda_plot(request): """ Display home page of PCA """ form = LdaPlotForm(request.POST, request.FILES) resp_data = dict() # PCA 3D plot = dict() if form.is_valid(): # Get input files data_file = form.cleaned_data["data_file"] label_file = form.cleaned_data["label_file"] df_input = DataFrameUtil.file_to_dataframe(data_file, header=None) df_label = DataFrameUtil.file_to_dataframe(label_file, header=None) clf = LinearDiscriminantAnalysis(n_components=3) X = df_input.values y = df_label.values clf.fit_transform(X, y) plot['x'] = list(X[:, 0]) plot['y'] = list(X[:, 1]) plot['z'] = list(X[:, 2]) resp_data['plot'] = plot else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def upload_file_handler(request): if(request.method == 'POST'): # upload file form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): data_file = request.FILES['data_file'] column_header = form.cleaned_data['column_header'] # filename = fs.save_file(data_file) column_header_idx = None if column_header == "on": column_header_idx = 0 df = DataFrameUtil.file_to_dataframe(data_file, header=column_header_idx) # file_json_data, columns_value = DataFrameUtil.convert_csv_to_json(file_full_path, header_row=column_header_idx, orient='values') # values, records # analyze_results = analyze_data(file_full_path) analyze_results = DataFrameUtil.analyze_dataframe(df) file_json_data, columns_name = DataFrameUtil.dataframe_to_json(df) resp_data = { # msg.SUCCESS:'The file has been uploaded successfully.', \ 'table_data': file_json_data, \ 'table_columns': columns_name, \ 'analysis': analyze_results} return JsonResponse(resp_data) else: # Form validation error resp_data = {msg.ERROR: escape(form._errors)} return JsonResponse(resp_data) else: resp_data = {msg.ERROR: "request is not POST."} return JsonResponse(resp_data)
def elbow_plot_handler_old(request): resp_data = dict() file_name = request.GET.get("file_name") column_header = request.GET.get("column_header") exclude_columns = request.GET.get("exclude_columns") print(column_header) if file_name: fs = FileStorage() file_full_path = fs.get_base_location() + file_name # If the file does exist, read data by panda and drop columns (if any) if fs.is_file(file_full_path): # Get data from file column_header_idx = None if column_header == "on": column_header_idx = 0; df = DataFrameUtil.convert_file_to_dataframe(file_full_path, header=column_header_idx) # Drop column specified by user if exclude_columns: str_column_indexs = exclude_columns.split(",") # column_indexs = list(map(int, str_column_indexs)) column_indexs = [int(i) - 1 for i in str_column_indexs] df = DataFrameUtil.drop_column_by_index(df, column_indexs) is_nan = np.any(np.isnan(df)) is_finite = np.all(np.isfinite(df)) # Standardize data X_scaled = PreProcessingUtil.standardize(df) # Get explain variance ratio pca_helper = PcaUtil() pca = pca_helper.get_fit_transfrom_pca(X_scaled) arr_variance_ratio = pca.explained_variance_ratio_ # Prepare all tabs to display Plot, Table by Bokeh # Add ratio to bokeh line graph elbow_plot = draw_elbow_plot(arr_variance_ratio) # Describe data # df_describe = df.describe().to_json() # df_describe_table = draw_df_describe_table(df) # Add line to a panel tab1 = Panel(child=elbow_plot, title="Elbow Curve Plot") # tab2 = Panel(child=df_describe_table, title="Data Description") # Add a panel to tab tabs = Tabs(tabs=[ tab1 ]) script, div = components(tabs) plots = { 'script': script, 'div': div} resp_data["bokeh_plot"] = plots # resp_data["data_describe"] = bokeh_df_describe_table else: resp_data["msg"] = "[ERROR] File is not found." else: resp_data['msg'] = "[ERROR] File name is invalid." return JsonResponse(resp_data)
def get_source_target_dataframe(form): source_file = form.cleaned_data["source_file"] target_file = form.cleaned_data["target_file"] df_source = DataFrameUtil.file_to_dataframe(source_file, header=0) df_target = DataFrameUtil.file_to_dataframe(target_file, header=0) # source_column_header = form.cleaned_data['source_column_header'] # target_column_header = form.cleaned_data['target_column_header'] # # df_source = pd.DataFrame() # Source file # df_target = pd.DataFrame() # Target file # # if source_file: # # Check if data contains header # source_column_header_idx = None # if source_column_header == "on": # source_column_header_idx = 0 # # df_source = DataFrameUtil.file_to_dataframe(source_file, header=source_column_header_idx) # # if target_file: # # Check if data contains header # target_column_header_idx = None # if target_column_header == "on": # target_column_header_idx = 0 # # df_target = DataFrameUtil.file_to_dataframe(target_file, header=target_column_header_idx) # return df_source, df_target
def save_data_handler(request): """ Clean up data """ form = SaveFileForm(request.POST, request.FILES) if form.is_valid(): file = request.FILES["data_file"] choice_cleanup = form.cleaned_data["choice_cleanup"] column_header = form.cleaned_data["column_header"] exclude_columns = form.cleaned_data["exclude_columns"] remain_columns = form.cleaned_data["remain_columns"] split_row_from = form.cleaned_data["split_row_from"] split_row_to = form.cleaned_data["split_row_to"] save_as_name = form.cleaned_data["save_as_name"] if save_as_name: # When column header is check, set to row 0 (zero based index) column_header_idx = None if column_header == "on": column_header_idx = 0 # df = read_file_to_dataframe(file_name, column_header_idx) df = DataFrameUtil.file_to_dataframe(file, header=column_header_idx) # Split row from - to if split_row_from and split_row_from: # To zero based index. split_row_from_idx = int(split_row_from) - 1 split_row_to_idx = int(split_row_to) df = df.iloc[split_row_from_idx:split_row_to_idx, :] # Delete NaN row if choice_cleanup == "delete": df = DataFrameUtil.drop_na_row(df) # Drop columns and store to new df. if exclude_columns: df = dataframe_exclude_columns(df, exclude_columns) if remain_columns: df = dataframe_remain_columns(df, remain_columns) # Don't forget to add '.csv' at the end of the path header = False if column_header_idx != None: header = True df.to_csv(fs.get_base_location() + save_as_name, index=None, header=header) columns_value = df.columns.tolist() file_json_data = df.to_json(orient='values') analyze_results = DataFrameUtil.analyze_dataframe(df) resp_data = {msg.SUCCESS:'The file has been save as ' + save_as_name, \ 'table_data': file_json_data, \ 'table_columns': columns_value, \ 'analysis': analyze_results} else: resp_data = {msg.ERROR:'[ERROR] Invalid parameter.'} return JsonResponse(resp_data)
def analyze_data(file_full_path, header_row=None): # Read data from file by panda dataframe # TODO header should be specified by user # Check NaN df = DataFrameUtil.convert_file_to_dataframe(file_full_path, header=header_row) results = DataFrameUtil.analyze_dataframe(df, header_row) return results
def extract_matched_key(key_file, data_file): # Process matching between keys from both file and write a new file for result. df_keys = DataFrameUtil.file_to_dataframe(key_file, header=None) df_data = DataFrameUtil.file_to_dataframe(data_file, header=None) # select data from df_data where the first column (keys) exist in df_keys keys = list(df_keys.iloc[:, 0].values) # print("Key", keys) # print("df data\n", df_data.iloc[:, 0]) # print("df data\n", df_data.iloc[:, 1]) df_result = df_data[ df_data.iloc[:, 0].isin(keys)] # print("Result", df_result) return df_result
def get_file_json_data(request): file_name = request.GET.get('file_name') column_header = request.GET.get('column_header') resp_data = dict() if file_name: file_full_path = fs.get_base_location() + file_name # If file does exist, get data as JSON if fs.is_file(file_full_path): column_header_idx = None if column_header == "on": column_header_idx = 0 json_data, columns = DataFrameUtil.convert_csv_to_json( file_full_path, header=column_header_idx) resp_data["table_columns"] = columns resp_data["table_data"] = json_data else: resp_data[msg.ERROR] = "File is not found." else: resp_data[msg.ERROR] = "Request parameter is incorrect." return JsonResponse(resp_data)
def read_data_detail_to_dataframe(data_file_name): # TODO change to DB data_file_name = "health_and_medical_history_501_600.csv" file_full_path = fs.get_full_path(file_name=data_file_name) df_data_detail = DataFrameUtil.convert_file_to_dataframe(file_full_path, header=0) return df_data_detail
def elbow_plot_handler(request): form = PcaPlotForm(request.POST, request.FILES) resp_data = dict(); if form.is_valid(): # Get input files data_file = form.cleaned_data["data_file"] df_input = DataFrameUtil.file_to_dataframe(data_file, header=None) X_scaled = PreProcessingUtil.standardize(df_input) # Get explain variance ratio pca_helper = PcaUtil() pca = pca_helper.get_fit_transfrom_pca(X_scaled) arr_variance_ratio = pca.explained_variance_ratio_ # Prepare all tabs to display Plot, Table by Bokeh # Add ratio to bokeh line graph elbow_plot = draw_elbow_plot(arr_variance_ratio) # Add line to a panel tab1 = Panel(child=elbow_plot, title="Elbow Curve Plot") # tab2 = Panel(child=df_describe_table, title="Data Description") # Add a panel to tab tabs = Tabs(tabs=[ tab1 ]) script, div = components(tabs) plots = { 'script': script, 'div': div} resp_data["bokeh_plot"] = plots else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def dataframe_exclude_columns(df, exclude_columns): """ exclude_columns - A string array of column entered by user from 1, 2, ... """ if exclude_columns: str_column_indexs = exclude_columns.split(",") column_indexs = [int(i) - 1 for i in str_column_indexs] return DataFrameUtil.drop_column_by_index(df, column_indexs)
def read_based_space_to_dataframe(): """ Read data from file and convert to dataframe for input X that will be predicted and generated as data in scatter plot """ # TODO need to change this setting to DB df_based_space = DataFrameUtil.convert_file_to_dataframe( fs.get_full_path("radiomic_result_501_600.csv"), header=0) return df_based_space
def process_model_data(model_file_name, data_file_name, data_detail_file_name): # convert file to dataframe fs = FileStorage() # TODO change column_header_idx = None # Dataframe of data to process, it is new data apart from training df_data = DataFrameUtil.convert_file_to_dataframe(fs.get_full_path(data_file_name), \ header=column_header_idx) # Dataframe for matching index with processed data and show detail column_header_idx = 0 df_data_detail = DataFrameUtil.convert_file_to_dataframe(fs.get_full_path(data_detail_file_name), \ header=column_header_idx) # Load model model = ModelUtils.load_model(model_file_name) # TODO!!!!!! change to DB and dynamic # Do PCA logger.debug("Dimensionality Reduction by PCA...") pca_helper = PcaUtil() # Standardize data, reduce dimensions and return as X. X_scaled = PreProcessingUtil.fit_transform(df_data) # TODO change n =100 to dynamic X_reduced = pca_helper.get_pc(X_scaled, n_components=100) pred_y = model.predict(X_reduced) df_label = pd.DataFrame(pred_y, columns=["Label"]) # TODO Keep predicted result as label # https://www.geeksforgeeks.org/different-ways-to-create-pandas-dataframe/ X_graph = pca_helper.get_pc(X_scaled, n_components=2) df_data = pd.DataFrame(X_graph, columns=['PC1', 'PC2']) df_graph = df_label.join(df_data) scrip, div = draw_2d(df_graph, df_data_detail) plot = dict() plot['script'] = scrip plot['div'] = div # Matching detail of data based row/index return plot
def load_model(model_name): # TODO change to load setting from DB DB # model_file_name = "radiomic482_svm_ovo_model.joblib" # model = ModelUtils.load_model(model_file_name) # TODO below data must be trained data df_train = DataFrameUtil.convert_file_to_dataframe( fs.get_full_path("radiomic482_no_key.csv"), header=0) X_scaled = PreProcessingUtil.standardize(df_train) X_reduced = PcaUtil.reduce_dimension(X_scaled, n_components=50) model = KMeanUtil.get_kmean_model(X_reduced, n_clusters=5, random_state=42) return model
def get_scaled_dataframe(form): data_file_name = form.cleaned_data['data_file_name'] column_header = form.cleaned_data['column_header'] df = None # X = None # Get file from storage data_file_full_path = fs.get_full_path(data_file_name) if column_header == "on": column_header_idx = 0 df = DataFrameUtil.convert_file_to_dataframe(data_file_full_path, header=column_header_idx) df_scaled = PreProcessingUtil.standardize(df) return df_scaled
def matched_keys_handler(request): form = ExtractMatchedKeysForm(request.POST, request.FILES) resp_data = dict() if form.is_valid(): key_file = request.FILES["key_file"] data_file = request.FILES["data_file"] df_result = extract_matched_key(key_file, data_file) file_json_data = df_result.to_json(orient='values') analyze_results = DataFrameUtil.analyze_dataframe(df_result) resp_data['table_data'] = file_json_data # df_result.values resp_data['table_columns'] = df_result.columns.tolist() resp_data['analysis'] = analyze_results else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def pca_plot(request): """ Display home page of PCA """ form = PcaPlotForm(request.POST, request.FILES) resp_data = dict(); # PCA 3D plot = dict() if form.is_valid(): # Get input files data_file = form.cleaned_data["data_file"] df_input = DataFrameUtil.file_to_dataframe(data_file, header=None) X, pca = PcaUtil.reduce_dimension(df_input, n_components=3) plot['x'] = list(X[:, 0]) plot['y'] = list(X[:, 1]) plot['z'] = list(X[:, 2]) resp_data['plot'] = plot # print(resp_data) else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def pipeline_run_handler(request): resp_data = dict() form = PipelineForm(request.GET) # When it's valid, data from screen is converted to Python type # and stored in clean_data if form.is_valid(): str_pipeline = form.cleaned_data['pipeline'] dataset_file_name = form.cleaned_data['dataset_file_name'] column_header = form.cleaned_data['column_header'] label_file_name = form.cleaned_data['label_file_name'] label_column_header = form.cleaned_data['label_column_header'] # Dimensionality Reduction pca_n_components = form.cleaned_data['pca_n_components'] kernel_pca_n_components = form.cleaned_data['kernel_pca_n_components'] lda_n_components = form.cleaned_data['lda_n_components'] tsne_n_components = form.cleaned_data['tsne_n_components'] # Test test_size = form.cleaned_data['test_size'] n_folds = form.cleaned_data['n_folds'] # Save model save_as_name = form.cleaned_data['save_as_name'] # Feature Selection sfs_k_features = form.cleaned_data['sfs_k_features'] sfs_k_neighbors = form.cleaned_data['sfs_k_neighbors'] sfs_forward = form.cleaned_data['sfs_forward'] sfs_floating = form.cleaned_data['sfs_floating'] sfs_scoring = form.cleaned_data['sfs_scoring'] sfs_cv = form.cleaned_data['sfs_cv'] sfs_n_jobs = form.cleaned_data['sfs_n_jobs'] select_k_best_n_k = form.cleaned_data['select_k_best_n_k'] stratified_kfold_n_split = form.cleaned_data[ 'stratified_kfold_n_split'] stratified_kfold_shuffle = form.cleaned_data[ 'stratified_kfold_shuffle'] # Dataframe for storing dataset from file. df = pd.DataFrame() if fs.is_file_in_base_location(dataset_file_name): # and fs.is_file_in_base_location(label_file_name): # Get data file and store in data frame. data_file_path = fs.get_base_location() + dataset_file_name # dataset column header checking column_header_idx = None if column_header == "on": column_header_idx = 0 df = DataFrameUtil.convert_file_to_dataframe( data_file_path, header=column_header_idx) # PCA process # Features data X = df # Label data y = None # Use pandas to read data then change to 1D array if fs.is_file_in_base_location(label_file_name): label_column_header_idx = None if label_column_header == "on": label_column_header_idx = 0 label_file_path = fs.get_base_location() + label_file_name y = pd.read_csv(label_file_path, header=label_column_header_idx).values.ravel() # process pipeline arr_pipeline = str_pipeline.split(",") parameters = dict() parameters['n_folds'] = n_folds parameters['pca_n_components'] = pca_n_components parameters['kernel_pca_n_components'] = kernel_pca_n_components parameters['lda_n_components'] = lda_n_components parameters['tsne_n_components'] = tsne_n_components parameters['test_size'] = test_size parameters['select_k_best_n_k'] = select_k_best_n_k parameters['stratified_kfold_n_split'] = stratified_kfold_n_split parameters['stratified_kfold_shuffle'] = stratified_kfold_shuffle if sfs_k_features != "": # In case of feature selection, plot result as table # Feature Selection parameters['sfs_k_neighbors'] = sfs_k_neighbors parameters['sfs_k_features'] = sfs_k_features parameters['sfs_forward'] = sfs_forward parameters['sfs_floating'] = sfs_floating parameters['sfs_scoring'] = sfs_scoring parameters['sfs_cv'] = sfs_cv parameters['sfs_n_jobs'] = sfs_n_jobs parameters['feature_names'] = df.columns result, X, y, model = process_pipeline(arr_pipeline, X, y, parameters) print(X) print(y) resp_data = result if save_as_name != "": # If model is not fitted yet, fit the model and save if not ModelUtils.is_fitted(model): model.fit(X, y) save_as_name = ModelUtils.save_model(model, save_as_name) resp_data[ msg. SUCCESS] = "Model has been save successfully as " + save_as_name # Display table that list feature in order. if isinstance(X, np.ndarray) and X.any() \ or isinstance(X, pd.DataFrame) and not X.empty: # Check X dimension nD = X.shape[1] if nD == 2: # For 2D # pca_helper = PcaUtil() # X2d = pca_helper.reduce_dimension(X, n_components=2) df_plot = pd.DataFrame(data=X, columns=['x', 'y']) # df_label = pd.DataFrame(data=y, columns=['label']) df_plot['label'] = y resp_data['plot_data'] = df_plot.to_json() resp_data['dimension'] = 2 elif nD == 3: # For 3D # X3d = pca_helper.reduce_dimension(X, n_components=3) df_plot = pd.DataFrame(data=X, columns=['x', 'y', 'z']) # df_label = pd.DataFrame(data=y, columns=['label']) # df_plot = df_plot.join(df_label) df_plot['label'] = y resp_data['plot_data'] = df_plot.to_json() resp_data['dimension'] = 3 elif nD > 3: # Default to 3D pca_helper = PcaUtil() X = pca_helper.reduce_dimension(X, n_components=3) df_plot = pd.DataFrame(data=X, columns=['x', 'y', 'z']) df_label = pd.DataFrame(data=y, columns=['label']) df_plot = df_plot.join(df_label) resp_data['plot_data'] = df_plot.to_json() resp_data['dimension'] = 3 else: # File dataset file is not found. resp_data[msg.ERROR] = "File name is not found in storage." else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data, safe=False)
def read_file_to_dataframe(file_name, column_header_idx): file_full_path = fs.get_base_location() + file_name # Read the file data return DataFrameUtil.convert_file_to_dataframe(file_full_path, header=column_header_idx)
def unsupervised_learning_train_test_handler(request): resp_data = dict() process_log = [] msg = [] resp_data['process_log'] = process_log resp_data['msg'] = msg form = SupervisedLearningTrainTestForm(request.GET) # When it's valid, data from screen is converted to Python type # and stored in clean_data if form.is_valid(): sel_algorithm = form.cleaned_data['sel_algorithm'] sel_dim_reduction = form.cleaned_data['sel_dim_reduction'] n_components = form.cleaned_data['n_components'] dataset_file_name = form.cleaned_data['dataset_file_name'] column_header = form.cleaned_data['column_header'] label_file_name = form.cleaned_data['label_file_name'] label_column_header = form.cleaned_data['label_column_header'] test_size = form.cleaned_data['test_size'] sel_test_method = form.cleaned_data['sel_test_method'] n_folds = form.cleaned_data['n_folds'] is_saved = form.cleaned_data['is_saved'] model_file_name = form.cleaned_data['model_file_name'] # Dataframe for storing dataset from file. df = None if fs.is_file_in_base_location(dataset_file_name) \ and fs.is_file_in_base_location(label_file_name): # Get data file and store in data frame. data_file_path = fs.get_base_location() + dataset_file_name # dataset column header checking column_header_idx = None if column_header == "on": column_header_idx = 0 df = DataFrameUtil.convert_file_to_dataframe( data_file_path, header=column_header_idx) # PCA process # Features data X = None if sel_dim_reduction == "pca": logger.debug("Dimensionality Reduction by PCA...") pca_helper = PcaHelper() # Standardize data, reduce dimensions and return as X. X_scaled = PreProcessingUtil.fit_transform(df) X = pca_helper.get_pc(X_scaled, n_components) logger.debug("PCA Done") # Label data y = None label_file_path = fs.get_base_location() + label_file_name label_column_header_idx = None if label_column_header == "on": label_column_header_idx = 0 # Use pandas to read data then change to 1D array y = pd.read_csv(label_file_path, header=label_column_header_idx).values.ravel() clf = None # Model if sel_algorithm: logger.debug("Creating model by SVM...") # Split train, test data based on specified ratio. # Select to create SVM as one vs one or one vs all clf = init_model_object(sel_algorithm) if sel_test_method: logger.debug("Starting Cross Validation...") if sel_test_method == "cv" and n_folds: scores = cross_val_score(clf, X, y, cv=n_folds) txt_accuracy = "%0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2) logger.debug(txt_accuracy) resp_data["scores"] = scores.tolist() resp_data["accuracy_mean"] = scores.mean() resp_data["params"] = clf.get_params(deep=True) else: # Set random_state here to get the same split for different run. X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=42) if is_saved == 1 and model_file_name: clf.fit(X, y) logger.debug("Save model as %s", model_file_name) saved_model_file_name = ModelUtils.save_model( clf, model_file_name) resp_data[ "msg"] = "Model has been saved succuessfully as " + saved_model_file_name else: # File dataset file is not found. msg.append("File name is not found in storage.") else: resp_data['msg'] = form._errors return JsonResponse(resp_data)
def process_clean_up_data_handler(request): """ Clean up data by removing NaN rows, drop columns """ form = ProcessFileForm(request.POST, request.FILES) if form.is_valid(): file_name = request.FILES["data_file"] choice_cleanup = form.cleaned_data["choice_cleanup"] column_header = form.cleaned_data["column_header"] exclude_columns = form.cleaned_data["exclude_columns"] remain_columns = form.cleaned_data["remain_columns"] split_row_from = form.cleaned_data["split_row_from"] split_row_to = form.cleaned_data["split_row_to"] df = None if file_name: # When column header is check, set to row 0 (zero based index) column_header_idx = None if column_header == "on": column_header_idx = 0 df = DataFrameUtil.file_to_dataframe(file_name, header=column_header_idx) # df = read_file_to_dataframe(file_name, column_header_idx) # Split row from - to if split_row_from and split_row_from: # To zero based index. split_row_from_idx = split_row_from - 1 split_row_to_idx = split_row_to df = df.iloc[split_row_from_idx:split_row_to_idx, :] # TODO file with mean, median # Delete NaN row if choice_cleanup == "delete": df = DataFrameUtil.drop_na_row(df) # Drop columns and store to new df. if exclude_columns: df = dataframe_exclude_columns(df, exclude_columns) # Drop other columns except those specified by user. if remain_columns: df = dataframe_remain_columns(df, remain_columns) file_json_data = df.to_json(orient='values') columns_value = df.columns.tolist() analyze_results = DataFrameUtil.analyze_dataframe(df) resp_data = { # msg.SUCCESS:'The file has been uploaded successfully.', \ 'table_data': file_json_data, \ 'table_columns': columns_value, \ 'analysis': analyze_results} else: resp_data = {msg.ERROR:'[ERROR] Invalid request parameters.'} else: # Form validation error resp_data = {msg.ERROR: escape(form._errors)} return JsonResponse(resp_data) return JsonResponse(resp_data)
def process_pipeline(arr_pipeline, X, y, parameters): result = dict() clf = None # Model score = None for p in arr_pipeline: if p == "sfs": # Select data clf = feature_selection_sfs(X, y, parameters) if isinstance(X, pd.DataFrame): X = DataFrameUtil.get_columns_by_indexes( X, list(clf.k_feature_idx_)) elif isinstance(X, np.ndarray): X = X[:, list(clf.k_feature_idx_)] result["scores"] = clf.k_score_ result['table_columns'] = ['Feature Indexes', 'Feature Names'] # Convert data to array arr_feature_indexes = list(clf.k_feature_idx_) arr_feature_names = list(clf.k_feature_names_) result['table_data'] = [arr_feature_indexes, arr_feature_names] elif p == "select_k_best": # !! Input X must be non-negative. n_k = parameters['select_k_best_n_k'] X = SelectKBest(chi2, k=n_k).fit_transform(X, y) elif p == "scale": # Standardize data X = PreProcessingUtil.fit_transform(X) elif p == "pca": # reduce dimensions and return as X. # logger.debug("Dimensionality Reduction by PCA...") n_components = parameters['pca_n_components'] pca_helper = PcaUtil() X = pca_helper.reduce_dimension(X, n_components) elif p == "kernel_pca": # reduce dimensions and return as X. n_components = parameters['kernel_pca_n_components'] kpca = KernelPCA(n_components=n_components, kernel='rbf', gamma=15) X = kpca.fit_transform(X, y) elif p == "lda": n_components = parameters['lda_n_components'] clf = LinearDiscriminantAnalysis(n_components=n_components) X = clf.fit_transform(X, y) elif p == "tsne": n_components = parameters['tsne_n_components'] clf = TSNE(n_components=n_components) X = clf.fit_transform(X, y) elif p == "svmovo": # Split train, test data based on specified ratio. # Select to create SVM as one vs one or one vs all clf = svm.SVC(gamma='scale', decision_function_shape='ovo') # no fit_transform function for SVC # clf.fit(X, y) elif p == "svmovr": clf = svm.LinearSVC(max_iter=5000) elif p == "kfold": n_folds = parameters['n_folds'] scores = cross_val_score(clf, X, y, cv=n_folds) txt_accuracy = "%0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2) result["scores"] = scores.tolist() result["accuracy_mean"] = scores.mean() elif p == "stratified_kfold": stratified_kfold_n_split = parameters['stratified_kfold_n_split'] stratified_kfold_shuffle = parameters['stratified_kfold_shuffle'] StratifiedKFold(n_splits=stratified_kfold_n_split, shuffle=stratified_kfold_shuffle, random_state=42) elif p == "handout": # Set random_state here to get the same split for different run. test_size = parameters['test_size'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=42) # X = X_test if isinstance(clf, svm.SVC) or isinstance(clf, LinearSVC): clf.fit(X_train, y_train) y = clf.predict(X) else: # t-SNE, not SVM X = clf.fit_transform(X_train, y_train) if not isinstance(clf, TSNE): result["scores"] = clf.score(X_test, y_test).tolist() # result['X'] = X.tolist(); # result['y'] = y; # if p != "sfs" and clf: # result["params"] = clf.get_params(deep=True) print(clf) return result, X, y, clf
def process_data_handler(request): """ Get data for analysis and general information Result format plot: {original_data: {x: .., y:.., label: ...}, new_data: {x:..., y:..., label:...} data_table: {table_columns: ..., table_data: ...}} msg_info|msg_error|msg_success|msg_warning| : .... data_tables: {table1: { table_columns: [..,..] , table_data: [[..]], point_id: [...]}, table2: {...}} """ form = VisInputForm(request.POST, request.FILES) resp_data = dict() plot = dict() data_tables = dict() if form.is_valid(): data_file = form.cleaned_data["data_file"] label_file = form.cleaned_data["label_file"] add_data_file = form.cleaned_data["add_data_file"] predict_data_file = form.cleaned_data["new_data_file"] general_data_file = form.cleaned_data["general_data_file"] data_column_header = form.cleaned_data['data_column_header'] add_data_column_header = form.cleaned_data['add_data_column_header'] label_column_header = form.cleaned_data['label_column_header'] new_data_column_header = form.cleaned_data['new_data_column_header'] general_data_column_header = form.cleaned_data[ 'general_data_column_header'] df_data = pd.DataFrame() # Original data space df_label = pd.DataFrame() # Label of original data df_add_data = pd.DataFrame() # Additional data for base space df_new_data = pd.DataFrame() # New data to predict df_general_info = pd.DataFrame() # General info # Check if data contain table header or not. # Then select data with/without table header to generate dataframe. df_X_ori2d = None data_column_header_idx = None if data_file: if data_column_header == "on": data_column_header_idx = 0 df_data = DataFrameUtil.file_to_dataframe( data_file, header=data_column_header_idx) # Reduce dimension for visualization X_scaled = PreProcessingUtil.fit_transform(df_data) X_ori2d, pca = PcaUtil.reduce_dimension(X_scaled, n_components=2) # print(X_ori2d) # Convert result to resulting dataframe df_plot_original = pd.DataFrame(data=X_ori2d, columns=['x', 'y']) df_y_ori = None if label_file: label_column_header_idx = None if label_column_header == "on": label_column_header_idx = 0 df_label = DataFrameUtil.file_to_dataframe( label_file, header=label_column_header_idx) # df_y_ori = pd.DataFrame(data=df_label.values, columns=['label']) # Process additional data for data table df_add_data_id = pd.DataFrame() # For unique ID to add to data point if add_data_file: add_data_column_header_idx = None if add_data_column_header == "on": add_data_column_header_idx = 0 df_add_data = DataFrameUtil.file_to_dataframe( add_data_file, header=add_data_column_header_idx) df_add_data_id = df_add_data.iloc[:, 0] # Join base space X, y ==> label, x coordinate, y coordinate df_plot_original['label'] = df_label # Optional: Add unique key to data point if not df_add_data_id.empty: # Join id at the first column to format of: point_id, label, x, y # df_add_data_id = pd.DataFrame(data=df_add_data_id.values, columns=['point_id']) df_plot_original['point_id'] = df_add_data_id.values # df_plot_original = df_add_data_id.join(df_plot_original) # point_id, label, x, y plot["original_data"] = df_plot_original.to_json() # For SlickGrid format plot["original_data_split"] = df_plot_original.to_json( orient='columns') # ========== End of processing original data for data point ====== # Convert additional data to dataframe --> json response df_plot_predict = pd.DataFrame() # If new data file is uploaded, predict the data and add to plot if predict_data_file: new_column_header_idx = None if label_column_header == "on": label_column_header_idx = 0 df_new_data = DataFrameUtil.file_to_dataframe( predict_data_file, header=new_column_header_idx) # Process data with pipeline of selected algorithm X_new_scaled, y_predict = predict_new_data(df_new_data) X_new2d, new_pca = PcaUtil.reduce_dimension(X_new_scaled, n_components=2) df_plot_predict = pd.DataFrame(data=X_new2d, columns=['x', 'y']) df_plot_predict['label'] = y_predict # If additional info for predict data is uploaded, get ID from the file plot['new_data'] = df_plot_predict.to_json() # If additional info for predicting data is uploaded # Update new_data with point_id to get data in format of # point_id, label, x, y df_predict_data_info = pd.DataFrame() df_predict_data_id = pd.DataFrame() if general_data_file: general_data_column_header_idx = None if general_data_column_header == "on": general_data_column_header_idx = 0 df_predict_data_info = DataFrameUtil.file_to_dataframe( general_data_file, header=general_data_column_header_idx) # Optional: Add unique key to data point # Join id at the first column to point_id, label, x, y # df_predict_data_id = pd.DataFrame(data=df_predict_data_info.iloc[:, 0].values, columns=['point_id']) # df_plot_predict = df_predict_data_id.join(df_plot_predict) df_plot_predict[ 'point_id'] = data = df_predict_data_info.iloc[:, 0].values plot['new_data'] = df_plot_predict.to_json() # =========== End of Processing Predict Data ========= if not df_predict_data_info.empty: # append general info of new data to based space df_add_data = df_add_data.append(df_predict_data_info) # Prepare data for visualize resp_data['plot'] = plot # id for slickgrid (required) if not df_add_data_id.empty: df_data.insert(loc=0, column='id', value=df_add_data_id.values) else: df_data.insert(loc=0, column='id', value=np.arange(0, df_data.shape[0])) data_tables['table1'] = { 'table_data': df_data.to_json(orient='records'), \ 'point_id': str(list(df_data['id'].values))} if not df_add_data.empty: # For SlickGrid use orient='records' # Format point_id: [{..}, {..}] df_add_data['id'] = df_add_data.iloc[:, 0].values # Slickgrid does not support column with dot like "f.eid" df_add_data.rename(columns={'f.eid': 'f:eid'}, inplace=True) data_tables['table2'] = { 'table_data': df_add_data.to_json(orient='records'), \ 'point_id': df_add_data.iloc[:, 0].to_json(orient='values')} # TypeError: Object of type 'int64' is not JSON serializable # Then cast to str resp_data['height_min'] = str(df_add_data['height'].min()) resp_data['height_max'] = str(df_add_data['height'].max()) resp_data['weight_min'] = str(df_add_data['weight'].min()) resp_data['weight_max'] = str(df_add_data['weight'].max()) resp_data['age_min'] = str(df_add_data['age'].min()) resp_data['age_max'] = str(df_add_data['age'].max()) resp_data['data_tables'] = data_tables else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)
def process_data_handler(request): """ Process uploaded data to find 3 features that most relevance to clinical outcomes Result returned in JSON format as following: - plot: {data: {x: .., y:.., z: ..., label: ..., column_names: []}} - msg_info|msg_error|msg_success|msg_warning| : .... data_tables: {table1: { table_columns: [..,..] , table_data: [[..]], point_id: [...]}, table2: {...}} """ form = DataFileInputForm(request.POST, request.FILES) resp_data = dict() # 3D most importance features plot = dict() # Plot Feature ranking plot_feature_ranking = dict() data_tables = dict() if form.is_valid(): # Get input files data_file = form.cleaned_data["data_file"] output_file = form.cleaned_data["output_file"] data_column_header = form.cleaned_data['data_column_header'] output_column_header = form.cleaned_data['output_column_header'] # print(data_column_header, output_column_header) # Declare empty dataframe to store uploaded data. df_data = pd.DataFrame() df_output = pd.DataFrame() # Convert files to dataframe # Check if data contain table header or not. # Then select data with/without table header to generate dataframe. # Check if both required input files are valid. if data_file and output_file: # Convert radiomic data to dataframe data_column_header_idx = None if data_column_header == "on": data_column_header_idx = 0 df_data = DataFrameUtil.file_to_dataframe( data_file, header=data_column_header_idx) if data_column_header_idx == None: # generate from 0 to len gen_cols = np.arange(0, df_data.shape[1]).astype(str) df_data.columns = gen_cols # Convert clinical outcomes data to dataframe output_column_header_idx = None if output_column_header == "on": output_column_header_idx = 0 if output_column_header_idx == None: # generate from 0 to len gen_cols_output = np.arange(0, df_output.shape[1]).astype(str) df_output.columns = gen_cols_output df_output = DataFrameUtil.file_to_dataframe( output_file, header=output_column_header_idx) # Apply feature selection model to select most 2 or 3 relevant features with clinical outcomes X_selected, arr_sorted_columns, arr_sorted_importance, arr_cate_columns = feature_selection_random_forest_regressor( df_data, df_output) # Prepare result for plotting 3D and grid tables for uploaded data # e.g. plot - selected feature, grids - radiomic, outcomes # Generate unique id for each row since it is required for slickgrid # TODO change unique_ids to patient ID or etc (confirm with Carlos) unique_ids = np.arange(0, df_data.shape[0]) if df_data.shape[1] > 2: space_col_names = ['x', 'y', 'z'] else: space_col_names = ['x', 'y'] plot_data = pd.DataFrame(data=X_selected.values, columns=space_col_names) plot_data['label'] = unique_ids plot['column_names'] = list(X_selected.columns.values) # Feature ranking plot_feature_ranking['column_names'] = arr_sorted_columns plot_feature_ranking['importances'] = arr_sorted_importance # Data table plot["data"] = plot_data.to_json() # Add column 'id' for slickgrid df_data.insert(loc=0, column='id', value=unique_ids) data_tables['table1'] = { 'table_data': df_data.to_json(orient='records'), \ 'column_names': list(df_data.columns.values), \ 'point_id': str(unique_ids)} # Original outcomes column names are used for generating group of colorscale button in UI part. # original_outcomes_columns = df_output.columns.value df_output.insert(loc=0, column='id', value=unique_ids) data_tables['table2'] = { 'table_data': df_output.to_json(orient='records'), \ 'column_names': list(df_output.columns.values), \ 'point_id': str(unique_ids), # not used in frontend 'cate_columns': arr_cate_columns} # Prepare response data resp_data['plot'] = plot resp_data['plot_feature_ranking'] = plot_feature_ranking resp_data['data_tables'] = data_tables else: resp_data[msg.ERROR] = escape(form._errors) return JsonResponse(resp_data)