Beispiel #1
0
def elbow_plot_handler_old(request):
    resp_data = dict()
    file_name = request.GET.get("file_name")
    column_header = request.GET.get("column_header")
    exclude_columns = request.GET.get("exclude_columns")
    print(column_header)
    if file_name:
        fs = FileStorage()
        file_full_path = fs.get_base_location() + file_name
        
        # If the file does exist, read data by panda and drop columns (if any)
        if fs.is_file(file_full_path):
            # Get data from file
            column_header_idx = None
            if column_header == "on":
                column_header_idx = 0;
               
            df = DataFrameUtil.convert_file_to_dataframe(file_full_path, header=column_header_idx) 
            # Drop column specified by user
            if exclude_columns:
                str_column_indexs = exclude_columns.split(",")
                # column_indexs = list(map(int, str_column_indexs))
                column_indexs = [int(i) - 1 for i in str_column_indexs]
                df = DataFrameUtil.drop_column_by_index(df, column_indexs)
                is_nan = np.any(np.isnan(df))
                is_finite = np.all(np.isfinite(df))
            
            # Standardize data
            X_scaled = PreProcessingUtil.standardize(df)
            
            # Get explain variance ratio
            pca_helper = PcaUtil()
            pca = pca_helper.get_fit_transfrom_pca(X_scaled)
            arr_variance_ratio = pca.explained_variance_ratio_
            
            # Prepare all tabs to display Plot, Table by Bokeh
            # Add ratio to bokeh line graph
            elbow_plot = draw_elbow_plot(arr_variance_ratio)
            
            # Describe data 
#             df_describe = df.describe().to_json()
           #  df_describe_table = draw_df_describe_table(df)
            
            # Add line to a panel
            tab1 = Panel(child=elbow_plot, title="Elbow Curve Plot")
            # tab2 = Panel(child=df_describe_table, title="Data Description")
            # Add a panel to tab
            tabs = Tabs(tabs=[ tab1 ])

            script, div = components(tabs)
            plots = { 'script': script, 'div': div}
            resp_data["bokeh_plot"] = plots
            # resp_data["data_describe"] = bokeh_df_describe_table
        else:
            resp_data["msg"] = "[ERROR] File is not found."
        
    else:
        resp_data['msg'] = "[ERROR] File name is invalid."
    
    return JsonResponse(resp_data) 
Beispiel #2
0
def elbow_plot_handler(request):
    form = PcaPlotForm(request.POST, request.FILES)
    resp_data = dict();
    if form.is_valid():
         # Get input files
        data_file = form.cleaned_data["data_file"]
        df_input = DataFrameUtil.file_to_dataframe(data_file, header=None)
        
        X_scaled = PreProcessingUtil.standardize(df_input)
            
        # Get explain variance ratio
        pca_helper = PcaUtil()
        pca = pca_helper.get_fit_transfrom_pca(X_scaled)
        arr_variance_ratio = pca.explained_variance_ratio_
        
        # Prepare all tabs to display Plot, Table by Bokeh
        # Add ratio to bokeh line graph
        elbow_plot = draw_elbow_plot(arr_variance_ratio)

        # Add line to a panel
        tab1 = Panel(child=elbow_plot, title="Elbow Curve Plot")
        # tab2 = Panel(child=df_describe_table, title="Data Description")
        # Add a panel to tab
        tabs = Tabs(tabs=[ tab1 ])

        script, div = components(tabs)
        plots = { 'script': script, 'div': div}
        resp_data["bokeh_plot"] = plots
        
    else:
        resp_data[msg.ERROR] = escape(form._errors)
    
    return JsonResponse(resp_data)
def predict_new_data(df_new_data, model_id):
    """
    Predict new data based on selected model
    """
    model = ModelUtils.load_model(model_file_name=model_id)
    # Process data with selected algorithm pipeline
    # print(df_new_data)
    X_new_scaled = PreProcessingUtil.fit_transform(df_new_data)
    y_predict = model.predict(X_new_scaled)

    return X_new_scaled, y_predict
def load_model(model_name):
    # TODO change to load setting from DB DB
    # model_file_name = "radiomic482_svm_ovo_model.joblib"
    # model = ModelUtils.load_model(model_file_name)

    # TODO below data must be trained data
    df_train = DataFrameUtil.convert_file_to_dataframe(
        fs.get_full_path("radiomic482_no_key.csv"), header=0)
    X_scaled = PreProcessingUtil.standardize(df_train)
    X_reduced = PcaUtil.reduce_dimension(X_scaled, n_components=50)
    model = KMeanUtil.get_kmean_model(X_reduced, n_clusters=5, random_state=42)
    return model
Beispiel #5
0
def get_scaled_dataframe(form):
    data_file_name = form.cleaned_data['data_file_name']
    column_header = form.cleaned_data['column_header']
    df = None
    # X = None
    # Get file from storage
    data_file_full_path = fs.get_full_path(data_file_name)
    if column_header == "on":
        column_header_idx = 0

    df = DataFrameUtil.convert_file_to_dataframe(data_file_full_path,
                                                 header=column_header_idx)
    df_scaled = PreProcessingUtil.standardize(df)
    return df_scaled
Beispiel #6
0
def predict_new_data(df_new_data):
    """
    Test algorithm
    """
    # TODO change this algorithm to final algo
    #     array_pipeline = [PIPELINE_SCALE, PIPELINE_SVM_OVO, PIPELINE_K_FOLD, PIPELINE_PCA]
    #     parameters = dict()
    #     parameters['n_folds'] = 5
    #     parameters['pca_n_components'] = 2
    model = ModelUtils.load_model(
        model_file_name="uci_breast_cancer_svmovo3.joblib")
    # Process data with selected algorithm pipeline
    print(df_new_data)
    X_new_scaled = PreProcessingUtil.fit_transform(df_new_data)
    y_predict = model.predict(X_new_scaled)

    return X_new_scaled, y_predict
Beispiel #7
0
def process_model_data(model_file_name, data_file_name, data_detail_file_name):
    # convert file to dataframe
    fs = FileStorage()
    # TODO change
    column_header_idx = None
    # Dataframe of data to process, it is new data apart from training
    df_data = DataFrameUtil.convert_file_to_dataframe(fs.get_full_path(data_file_name), \
                                             header=column_header_idx)

    # Dataframe for matching index with processed data and show detail
    column_header_idx = 0
    df_data_detail = DataFrameUtil.convert_file_to_dataframe(fs.get_full_path(data_detail_file_name), \
                                             header=column_header_idx)

    # Load model
    model = ModelUtils.load_model(model_file_name)

    # TODO!!!!!! change to DB and dynamic
    # Do PCA
    logger.debug("Dimensionality Reduction by PCA...")
    pca_helper = PcaUtil()
    # Standardize data, reduce dimensions and return as X.
    X_scaled = PreProcessingUtil.fit_transform(df_data)

    # TODO change n =100 to dynamic
    X_reduced = pca_helper.get_pc(X_scaled, n_components=100)
    pred_y = model.predict(X_reduced)
    df_label = pd.DataFrame(pred_y, columns=["Label"])

    # TODO Keep predicted result as label

    # https://www.geeksforgeeks.org/different-ways-to-create-pandas-dataframe/
    X_graph = pca_helper.get_pc(X_scaled, n_components=2)
    df_data = pd.DataFrame(X_graph, columns=['PC1', 'PC2'])

    df_graph = df_label.join(df_data)
    scrip, div = draw_2d(df_graph, df_data_detail)

    plot = dict()
    plot['script'] = scrip
    plot['div'] = div
    # Matching detail of data based row/index

    return plot
def process():
    # TODO need to pass model name from DB
    model = load_model(model_name="xx")
    df_base_space = read_based_space_to_dataframe()

    X_scaled = PreProcessingUtil.standardize(df_base_space)
    X_reduced = PcaUtil.reduce_dimension(X_scaled, n_components=50)
    label = model.predict(X_reduced)

    X_2d = PcaUtil.reduce_dimension(X_scaled, n_components=2)
    # TODO add file name from DB
    df_data_detail = read_data_detail_to_dataframe(data_file_name="")

    # Join all data to one dataframe: x, y, label, data_detail
    df_result = pd.DataFrame(data=X_2d, columns=['x', 'y'])
    df_label = pd.DataFrame(data=label, columns=['label'])
    df_result = df_result.join(df_label)
    # Add Medical History Resuolt
    df_result = df_result.join(df_data_detail)
    # Add Radiomic Result
    df_result = df_result.join(df_base_space)
    return df_result
Beispiel #9
0
def process_pipeline(arr_pipeline, X, y, parameters):
    result = dict()
    clf = None  # Model
    score = None

    for p in arr_pipeline:
        if p == "sfs":
            # Select data
            clf = feature_selection_sfs(X, y, parameters)
            if isinstance(X, pd.DataFrame):
                X = DataFrameUtil.get_columns_by_indexes(
                    X, list(clf.k_feature_idx_))
            elif isinstance(X, np.ndarray):
                X = X[:, list(clf.k_feature_idx_)]

            result["scores"] = clf.k_score_
            result['table_columns'] = ['Feature Indexes', 'Feature Names']
            # Convert data to array
            arr_feature_indexes = list(clf.k_feature_idx_)
            arr_feature_names = list(clf.k_feature_names_)
            result['table_data'] = [arr_feature_indexes, arr_feature_names]
        elif p == "select_k_best":
            # !! Input X must be non-negative.
            n_k = parameters['select_k_best_n_k']
            X = SelectKBest(chi2, k=n_k).fit_transform(X, y)
        elif p == "scale":
            # Standardize data
            X = PreProcessingUtil.fit_transform(X)
        elif p == "pca":
            # reduce dimensions and return as X.
            # logger.debug("Dimensionality Reduction by PCA...")
            n_components = parameters['pca_n_components']
            pca_helper = PcaUtil()
            X = pca_helper.reduce_dimension(X, n_components)

        elif p == "kernel_pca":
            # reduce dimensions and return as X.
            n_components = parameters['kernel_pca_n_components']
            kpca = KernelPCA(n_components=n_components, kernel='rbf', gamma=15)
            X = kpca.fit_transform(X, y)
        elif p == "lda":
            n_components = parameters['lda_n_components']
            clf = LinearDiscriminantAnalysis(n_components=n_components)
            X = clf.fit_transform(X, y)

        elif p == "tsne":
            n_components = parameters['tsne_n_components']
            clf = TSNE(n_components=n_components)
            X = clf.fit_transform(X, y)

        elif p == "svmovo":
            # Split train, test data based on specified ratio.
            # Select to create SVM as one vs one or one vs all
            clf = svm.SVC(gamma='scale', decision_function_shape='ovo')
            # no fit_transform function for SVC
            # clf.fit(X, y)

        elif p == "svmovr":
            clf = svm.LinearSVC(max_iter=5000)

        elif p == "kfold":
            n_folds = parameters['n_folds']
            scores = cross_val_score(clf, X, y, cv=n_folds)
            txt_accuracy = "%0.2f (+/- %0.2f)" % (scores.mean(),
                                                  scores.std() * 2)
            result["scores"] = scores.tolist()
            result["accuracy_mean"] = scores.mean()

        elif p == "stratified_kfold":
            stratified_kfold_n_split = parameters['stratified_kfold_n_split']
            stratified_kfold_shuffle = parameters['stratified_kfold_shuffle']
            StratifiedKFold(n_splits=stratified_kfold_n_split,
                            shuffle=stratified_kfold_shuffle,
                            random_state=42)

        elif p == "handout":
            # Set random_state here to get the same split for different run.
            test_size = parameters['test_size']
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=test_size, random_state=42)

            # X = X_test
            if isinstance(clf, svm.SVC) or isinstance(clf, LinearSVC):
                clf.fit(X_train, y_train)
                y = clf.predict(X)
            else:
                # t-SNE, not SVM
                X = clf.fit_transform(X_train, y_train)

            if not isinstance(clf, TSNE):
                result["scores"] = clf.score(X_test, y_test).tolist()

        # result['X'] = X.tolist();
        # result['y'] = y;
        # if p != "sfs" and clf:
        # result["params"] = clf.get_params(deep=True)
    print(clf)
    return result, X, y, clf
Beispiel #10
0
    def get_reduced_dim_data(df_source, df_target, feature_indexes,
                             target_label_index, arr_target_filter_col,
                             arr_numtypes, arr_criterion, reduce_dim_algorithm,
                             n_components):
        """
            Filter data by criterion and do PCA for 3d
            reduce_dim_algorithm: Only PCA is implemented for this phase
        """

        # Select only the selected source columns in radiomics
        df_selected_source = Helper.get_selected_columns_data(
            df_source, feature_indexes)
        df_selected_target = Helper.get_selected_columns_data(
            df_target, arr_target_filter_col)
        # Use length to split result between source and label later
        len_selected_source = len(df_selected_source.columns)

        df_data = df_selected_source.join(df_selected_target)

        # df_data, arr_criterion_columns, arr_numtypes, arr_criterion
        arr_criterion_columns = list(df_selected_target.columns)
        # get_filtered_data(df_data, target_col_indexes, arr_numtypes, arr_criterion_column_names, arr_criterion_value):
        df_start_res = Helper.get_filtered_data(df_data, arr_numtypes,
                                                arr_criterion_columns,
                                                arr_criterion)

        X = df_start_res.iloc[:, 0:len_selected_source]
        y = df_start_res[[df_target.columns.values[int(target_label_index)]]]
        # Select target columns
        #         df_selected_source = df_source.iloc[:, arr_int_source_col_idx]
        #         arr_selected_source_col = list(df_selected_source.columns)

        # Standardize data
        X_scaled = PreProcessingUtil.standardize(X)

        # When 3 features are selected, skip doing PCA and directly return result from filtering and standard scalar.
        if len_selected_source == 3:
            return X_scaled, y
        else:

            dim_3d = []
            pca_helper = PcaUtil()
            if reduce_dim_algorithm == PCA:
                # Get X transformed by PCA
                dim_3d, pca = pca_helper.reduce_dimension(
                    X_scaled, n_components=n_components)

            elif reduce_dim_algorithm == LDA:
                new_y = None
                # LDA support only 1 target, so this encode only one target
                col_y = y.columns.values
                label_type = y.loc[:, col_y[0]].dtype
                if label_type == 'object':
                    encoder = EncodingCategoricalFeatures()
                    new_y = encoder.label_encoder(y.loc[:, col_y[0]].values)

                elif label_type in [np.float64]:
                    raise BizValidationExption(
                        "Target Label", "Data type cannot be float number.")
                else:
                    new_y = y

                if isinstance(new_y, pd.core.frame.DataFrame):
                    new_y = y.values

                n_labels = len(np.unique(new_y))
                if n_labels <= 3:
                    raise BizValidationExption(
                        "LDA",
                        "To reduce dimension by LDA to 3 dimensions, number of classes must be greater than 3."
                    )

                # Dont specify , n_components=n_components in PCA because the result is different
                X_transformed, pca = pca_helper.reduce_dimension(X_scaled)
                dim_3d = LdaUtil.reduce_dimension(X_transformed,
                                                  new_y.ravel(),
                                                  n_components=n_components)

        return dim_3d, y
Beispiel #11
0
def process_data_handler(request):
    """
    Get data for analysis and general information
    Result format
        plot: {original_data: {x: .., y:.., label: ...},
              new_data: {x:..., y:..., label:...}
              data_table: {table_columns: ..., table_data: ...}}
        msg_info|msg_error|msg_success|msg_warning| : ....
        
        data_tables: {table1: { table_columns: [..,..] , table_data: [[..]], point_id: [...]}, table2: {...}}
    """
    form = VisInputForm(request.POST, request.FILES)
    resp_data = dict()
    plot = dict()
    data_tables = dict()
    if form.is_valid():
        data_file = form.cleaned_data["data_file"]
        label_file = form.cleaned_data["label_file"]
        add_data_file = form.cleaned_data["add_data_file"]
        predict_data_file = form.cleaned_data["new_data_file"]
        general_data_file = form.cleaned_data["general_data_file"]

        data_column_header = form.cleaned_data['data_column_header']
        add_data_column_header = form.cleaned_data['add_data_column_header']
        label_column_header = form.cleaned_data['label_column_header']
        new_data_column_header = form.cleaned_data['new_data_column_header']
        general_data_column_header = form.cleaned_data[
            'general_data_column_header']

        df_data = pd.DataFrame()  # Original data space
        df_label = pd.DataFrame()  # Label of original data
        df_add_data = pd.DataFrame()  # Additional data for base space
        df_new_data = pd.DataFrame()  # New data to predict
        df_general_info = pd.DataFrame()  # General info

        # Check if data contain table header or not.
        # Then select data with/without table header to generate dataframe.
        df_X_ori2d = None
        data_column_header_idx = None
        if data_file:

            if data_column_header == "on":
                data_column_header_idx = 0
            df_data = DataFrameUtil.file_to_dataframe(
                data_file, header=data_column_header_idx)
            # Reduce dimension for visualization
            X_scaled = PreProcessingUtil.fit_transform(df_data)
            X_ori2d, pca = PcaUtil.reduce_dimension(X_scaled, n_components=2)
            # print(X_ori2d)

            # Convert result to resulting dataframe
            df_plot_original = pd.DataFrame(data=X_ori2d, columns=['x', 'y'])

        df_y_ori = None
        if label_file:
            label_column_header_idx = None
            if label_column_header == "on":
                label_column_header_idx = 0
            df_label = DataFrameUtil.file_to_dataframe(
                label_file, header=label_column_header_idx)
            # df_y_ori = pd.DataFrame(data=df_label.values, columns=['label'])

        # Process additional data for data table

        df_add_data_id = pd.DataFrame()  # For unique ID to add to data point
        if add_data_file:
            add_data_column_header_idx = None
            if add_data_column_header == "on":
                add_data_column_header_idx = 0
            df_add_data = DataFrameUtil.file_to_dataframe(
                add_data_file, header=add_data_column_header_idx)
            df_add_data_id = df_add_data.iloc[:, 0]

        # Join base space X, y ==> label, x coordinate, y coordinate
        df_plot_original['label'] = df_label

        # Optional: Add unique key to data point
        if not df_add_data_id.empty:
            # Join id at the first column to format of: point_id, label, x, y
            # df_add_data_id = pd.DataFrame(data=df_add_data_id.values, columns=['point_id'])
            df_plot_original['point_id'] = df_add_data_id.values
            # df_plot_original = df_add_data_id.join(df_plot_original)

        # point_id, label, x, y
        plot["original_data"] = df_plot_original.to_json()
        # For SlickGrid format
        plot["original_data_split"] = df_plot_original.to_json(
            orient='columns')

        # ========== End of processing original data for data point ======

        # Convert additional data to dataframe --> json response
        df_plot_predict = pd.DataFrame()
        # If new data file is uploaded, predict the data and add to plot
        if predict_data_file:
            new_column_header_idx = None
            if label_column_header == "on":
                label_column_header_idx = 0
            df_new_data = DataFrameUtil.file_to_dataframe(
                predict_data_file, header=new_column_header_idx)
            # Process data with pipeline of selected algorithm
            X_new_scaled, y_predict = predict_new_data(df_new_data)
            X_new2d, new_pca = PcaUtil.reduce_dimension(X_new_scaled,
                                                        n_components=2)
            df_plot_predict = pd.DataFrame(data=X_new2d, columns=['x', 'y'])
            df_plot_predict['label'] = y_predict
            # If additional info for predict data is uploaded, get ID from the file
            plot['new_data'] = df_plot_predict.to_json()

        # If additional info for predicting data is uploaded
        # Update new_data with point_id to get data in format of
        # point_id, label, x, y
        df_predict_data_info = pd.DataFrame()
        df_predict_data_id = pd.DataFrame()
        if general_data_file:
            general_data_column_header_idx = None
            if general_data_column_header == "on":
                general_data_column_header_idx = 0

            df_predict_data_info = DataFrameUtil.file_to_dataframe(
                general_data_file, header=general_data_column_header_idx)
            # Optional: Add unique key to data point
            # Join id at the first column to point_id, label, x, y
            # df_predict_data_id = pd.DataFrame(data=df_predict_data_info.iloc[:, 0].values, columns=['point_id'])
            # df_plot_predict = df_predict_data_id.join(df_plot_predict)
            df_plot_predict[
                'point_id'] = data = df_predict_data_info.iloc[:, 0].values

            plot['new_data'] = df_plot_predict.to_json()

        # =========== End of Processing Predict Data =========

        if not df_predict_data_info.empty:
            # append general info of new data to based space
            df_add_data = df_add_data.append(df_predict_data_info)

        # Prepare data for visualize
        resp_data['plot'] = plot
        # id for slickgrid (required)
        if not df_add_data_id.empty:
            df_data.insert(loc=0, column='id', value=df_add_data_id.values)
        else:
            df_data.insert(loc=0,
                           column='id',
                           value=np.arange(0, df_data.shape[0]))

        data_tables['table1'] = { 'table_data': df_data.to_json(orient='records'), \
                                  'point_id':  str(list(df_data['id'].values))}

        if not df_add_data.empty:
            # For SlickGrid use orient='records'
            # Format point_id: [{..}, {..}]
            df_add_data['id'] = df_add_data.iloc[:, 0].values
            # Slickgrid does not support column with dot like "f.eid"
            df_add_data.rename(columns={'f.eid': 'f:eid'}, inplace=True)
            data_tables['table2'] = { 'table_data': df_add_data.to_json(orient='records'), \
                                     'point_id': df_add_data.iloc[:, 0].to_json(orient='values')}

            # TypeError: Object of type 'int64' is not JSON serializable
            # Then cast to str
            resp_data['height_min'] = str(df_add_data['height'].min())
            resp_data['height_max'] = str(df_add_data['height'].max())
            resp_data['weight_min'] = str(df_add_data['weight'].min())
            resp_data['weight_max'] = str(df_add_data['weight'].max())
            resp_data['age_min'] = str(df_add_data['age'].min())
            resp_data['age_max'] = str(df_add_data['age'].max())
        resp_data['data_tables'] = data_tables
    else:

        resp_data[msg.ERROR] = escape(form._errors)

    return JsonResponse(resp_data)
Beispiel #12
0
def unsupervised_learning_train_test_handler(request):
    resp_data = dict()
    process_log = []
    msg = []
    resp_data['process_log'] = process_log
    resp_data['msg'] = msg

    form = SupervisedLearningTrainTestForm(request.GET)
    # When it's valid, data from screen is converted to Python type
    # and stored in clean_data
    if form.is_valid():
        sel_algorithm = form.cleaned_data['sel_algorithm']
        sel_dim_reduction = form.cleaned_data['sel_dim_reduction']
        n_components = form.cleaned_data['n_components']
        dataset_file_name = form.cleaned_data['dataset_file_name']
        column_header = form.cleaned_data['column_header']
        label_file_name = form.cleaned_data['label_file_name']
        label_column_header = form.cleaned_data['label_column_header']
        test_size = form.cleaned_data['test_size']
        sel_test_method = form.cleaned_data['sel_test_method']
        n_folds = form.cleaned_data['n_folds']
        is_saved = form.cleaned_data['is_saved']
        model_file_name = form.cleaned_data['model_file_name']

        # Dataframe for storing dataset from file.
        df = None

        if fs.is_file_in_base_location(dataset_file_name) \
            and fs.is_file_in_base_location(label_file_name):

            # Get data file and store in data frame.
            data_file_path = fs.get_base_location() + dataset_file_name
            # dataset column header checking
            column_header_idx = None
            if column_header == "on":
                column_header_idx = 0

            df = DataFrameUtil.convert_file_to_dataframe(
                data_file_path, header=column_header_idx)

            # PCA process
            # Features data
            X = None
            if sel_dim_reduction == "pca":
                logger.debug("Dimensionality Reduction by PCA...")
                pca_helper = PcaHelper()
                # Standardize data, reduce dimensions and return as X.
                X_scaled = PreProcessingUtil.fit_transform(df)
                X = pca_helper.get_pc(X_scaled, n_components)
                logger.debug("PCA Done")

            # Label data
            y = None
            label_file_path = fs.get_base_location() + label_file_name
            label_column_header_idx = None
            if label_column_header == "on":
                label_column_header_idx = 0

            # Use pandas to read data then change to 1D array
            y = pd.read_csv(label_file_path,
                            header=label_column_header_idx).values.ravel()

            clf = None  # Model
            if sel_algorithm:
                logger.debug("Creating model by SVM...")
                # Split train, test data based on specified ratio.
                # Select to create SVM as one vs one or one vs all
                clf = init_model_object(sel_algorithm)

            if sel_test_method:
                logger.debug("Starting Cross Validation...")
                if sel_test_method == "cv" and n_folds:
                    scores = cross_val_score(clf, X, y, cv=n_folds)
                    txt_accuracy = "%0.2f (+/- %0.2f)" % (scores.mean(),
                                                          scores.std() * 2)
                    logger.debug(txt_accuracy)
                    resp_data["scores"] = scores.tolist()
                    resp_data["accuracy_mean"] = scores.mean()
                    resp_data["params"] = clf.get_params(deep=True)
                else:
                    # Set random_state here to get the same split for different run.
                    X_train, X_test, y_train, y_test = train_test_split(
                        X, y, test_size=test_size, random_state=42)

            if is_saved == 1 and model_file_name:
                clf.fit(X, y)
                logger.debug("Save model as %s", model_file_name)
                saved_model_file_name = ModelUtils.save_model(
                    clf, model_file_name)
                resp_data[
                    "msg"] = "Model has been saved succuessfully as " + saved_model_file_name
        else:
            # File dataset file is not found.
            msg.append("File name is not found in storage.")

    else:
        resp_data['msg'] = form._errors

    return JsonResponse(resp_data)