Esempio n. 1
0
def ml_input(request):  # obviously, it has to be added id record and everything concerning db
    if (request.method == 'POST'):

        template = "machine_learning/results.html"
        mat =[[0.12347442045527879, 0.8094406486883253],
 [0.13438271020294834, 0.9195616568032954],
 [0.7340808740690876, 0.501292876257899],
 [0.15205183424532076, 0.7723196374025724],
 [0.15305657903122016, 0.02967990232224793],
 [0.751312493253797, 0.15057926746395178],
 [0.3325655818985571, 0.8545431696554671],
 [0.388049400727121, 0.6359039900354648],
 [0.7656376483351357, 0.011118993319648052],
 [0.3030715521728802, 0.3478716425630006]]


        #print "culoculoculoculo"  # GET THE POST, ELABORATE AND GO TO THE DB OR THE PLOT
        #print request.POST
        mydict = dict(request.POST.iterlists())
        # for key in request.POST.iterkeys():  # "for key in request.GET" works too.
        #     # Add filtering logic here.
        #
        #     print key, request.POST.getlist(key)

        #print mydict

        #print '-' * 60
        #localdir = 'PhysioWat/preproc/scripts/processing_scripts/output/'
        #input_data = pd.DataFrame.from_csv(path=localdir + 'feat_claire_labeled.csv')  # , index_col=None, sep=',')
        exprecid = mydict['choose_id']
        #exprecid = [18]
        input_data = pddbload.load_file_pd_db(exprecid[0])
        num_feat = -1  # set to -1 because of

        percentage = mydict['test_percentage'][0]
        percentage = float(percentage) / 100.0
        list_of_feat = list(input_data.columns)
        num_iteration = mydict['number_of_iterations'][0]
        ft.iterations = int(num_iteration)
        algorithm = mydict['alg_choice'][0]
        flag = True
        if 'viewf' in mydict:
            if 'norm' in mydict['viewf']:
                input_data = ft.normalize(input_data)
                #print input_data
            train_data, test_data = ft.split(input_data, percentage)
            flag = False
            if 'sel' in mydict['viewf']:
                # print "i have selected the first stuff!"
                if 'k_selected' in mydict['FeatChoose']:
                    num_feat = mydict['feat_num']
                    if (num_feat <= 0):
                        return render(request, "machine_learning/form_error.html")
                    train_data, test_data, list_of_feat = ft.getfeatnumber(train_data, test_data, k) #RETURNS 2 SUBSET DF GIVEN IN INPUT THE TRAIN DATA, THE TEST DATA, AND THE NUMBER OF FEATS

                if ('k_auto' in mydict['FeatChoose']):
                    train_data, test_data, best_feat_n_mat, list_of_feat = ft.bestfeatn(train_data, test_data)
        if(flag == True):
            train_data, test_data = ft.split(input_data, percentage)
        print "dopo il case del viewf"

        if (algorithm == 'ALL') and ('auto' not in mydict['parameter_choiche']):
            return render(request, "machine_learning/form_error.html")

        if 'def' in mydict['parameter_choiche']:
            clf, score, error = ft.quick_crossvalidate(train_data, alg=algorithm)



        if 'pers' in mydict['parameter_choiche']:
            if (algorithm == 'KNN'):
                k_neighbour = mydict['k_neighbour'][0]
                print(k_neighbour)
                clf, score, error = ft.pers_crossvalidation1(train_data, algorithm, k_neighbour)
            if (algorithm == 'DCT'):
                max_features = mydict['max_features'][0]
                #print(type(max_features)) #IT'S A STRING!!!!
                clf, score, error = ft.pers_crossvalidation1(train_data, algorithm, max_features)
            if (algorithm == 'SVM'):
                kernel = mydict['kernel']
                C = mydict['C']
                clf, score, error = ft.pers_crossvalidation2(train_data, algorithm, kernel, C)
            if (algorithm == 'RFC'):
                max_features = mydict['max_features']
                number_estimators = mydict['number_estimators']
                clf, score, error = ft.pers_crossvalidation2(train_data, algorithm, max_features, number_estimators)
            if (algorithm == 'ADA'):
                number_estimators = mydict['number_estimators']
                learning_rate = mydict['learning_rate']
                clf, score, error = ft.pers_crossvalidation2(train_data, algorithm, number_estimators, learning_rate)
            if (algorithm == 'LDA'):
                solver = mydict['solver']
                clf, score, error = ft.pers_crossvalidation1(train_data, algorithm, solver)
        if 'auto' in mydict['parameter_choiche']:
            metrics = mydict['maximize'][0]
            #print  metrics
            clf, result_mat = ft.bestAlg(train_data, metrics)

        dic_metric, conf_mat = ft.test_learning(clf, test_data)

        print dic_metric, conf_mat

        h = heatmap()
        data = h.get_data(conf_mat)
        context = {'datac': json.dumps(data)}
        # TODO check the 'matrix for'
        return render(request, template, context)


        #CALL OTHER FUNCTIONS / GET OTHER DATAS/
        #final_ml_page(request, result_dict=dic_metric, conf_mat=conf_mat)
# ---------------------------------------------------------------------------------------
# TODO HERE STARTS THE FINAL PART OF THE MACHINE LEARNING, WHICH IS NO MORE PROCESSING BUT JUST RENDERING THE FORM (and getting the json)
# -------------------------------------------------------------------------------


    else:
        template = "machine_learning/ml_input.html"
        form_viewf = viewFeatures()
        form_f_par = FeatPar()
        form_test_par = TestParam()
        form_alg_choose = AlgChoose()
        form_alg_param = AlgParam()

        # form_knn = KnnParam()
        form_svm = SvmParam()
        form_knear = KNearParam()
        form_dectree = DecTreeParam()
        form_rndfor = RndForParam()
        form_adaboost = AdaBoostParam()
        form_lda = LatDirAssParam()
        form_autoParam = autoFitParam()
        form_list = [form_svm, form_knear, form_dectree, form_rndfor, form_adaboost, form_lda]

        id_list=getprocessedrecordid()
        print  id_list
        id_list=[(i, str(i)) for i in id_list ]
        print id_list
        form_list_id = id_choose(choices=id_list)
        print  form_list_id
        #print(form_viewf)
        #print form_f_par

        context = {'viewf': form_viewf,
                   'FPar': FeatPar,
                   'TPar': form_test_par,
                   'AlgChoose': form_alg_choose,
                   'AlgParamChoose': form_alg_param,
                   'forms': {'form_SVM': form_svm,
                             'form_KNN': form_knear,
                             'form_DCT': form_dectree,
                             'form_RFC': form_rndfor,
                             'form_ADA': form_adaboost,
                             'form_LDA': form_lda, },

                   'autoParam': form_autoParam,
                   'formListId':form_list_id
                   }
        print '-' * 60
        #print context['forms']
        #print '-' * 60
        return render(request, template, context)
Esempio n. 2
0
def ml_input(
    request, id_record
):  # obviously, it has to be added id record and everything concerning db
    if (request.method == 'POST'):

        # print "culoculoculoculo"  # GET THE POST, ELABORATE AND GO TO THE DB OR THE PLOT
        # print request.POST
        mydict = dict(request.POST.iterlists())
        # for key in request.POST.iterkeys():  # "for key in request.GET" works too.
        #     # Add filtering logic here.
        #
        #     print key, request.POST.getlist(key)

        print mydict
        # print '-' * 60
        # localdir = '/home/emanuele/wv_physio/PhysioWat/PhysioWat/preproc/scripts/processing_scripts/output/'
        # input_data = pd.DataFrame.from_csv(path=localdir + 'feat_claire_labeled.csv')  # , index_col=None, sep=','
        exprecid = mydict['choose_id']
        print exprecid
        input_data = pddbload.load_file_pd_db(int(exprecid[0]))
        num_feat = -1  # set to -1 because of
        print 'Ciao'
        print input_data.shape
        print 'hola'
        percentage = mydict['test_percentage'][0]
        percentage = float(percentage) / 100.0

        num_iteration = mydict['number_of_iterations'][0]
        ft.iterations = int(num_iteration)
        algorithm = mydict['alg_choice'][0]
        flag = True

        flag_has_selected_auto_feat = False
        list_of_feat = None
        best_feat_n_mat = []
        num_feat = -1
        auto_alg_result_mat = None
        best_feat_json = None

        if 'viewf' in mydict:
            print "hellp"
            if 'norm' in mydict['viewf']:
                input_data = ft.normalize(input_data)
                print input_data.shape
            train_data, test_data = ft.split(input_data, percentage)
            print train_data.shape, test_data.shape
            flag = False

            if 'sel' in mydict['viewf']:
                # print "i have selected the first stuff!"
                if 'k_selected' in mydict['FeatChoose']:
                    num_feat = int(mydict['feat_num'][0])
                    if (num_feat <= 0):
                        return render(request,
                                      "machine_learning/form_error.html")
                    train_data, test_data, list_of_feat = ft.getfeatnumber(
                        train_data, test_data, num_feat
                    )  # RETURNS 2 SUBSET DF GIVEN IN INPUT THE TRAIN DATA, THE TEST DATA, AND THE NUMBER OF FEATS

                if ('k_auto' in mydict['FeatChoose']):
                    print "hi"
                    train_data, test_data, best_feat_n_mat, list_of_feat = ft.bestfeatn(
                        train_data, test_data)
                    flag_has_selected_auto_feat = True
                    list_of_feat = list_of_feat[4]

        if (flag == True):
            train_data, test_data = ft.split(input_data, percentage)
            flag = False

        if (algorithm == 'ALL') and ('auto' not in mydict['parameter_choice']):
            return render(request, "machine_learning/form_error.html")

        if 'def' in mydict['parameter_choice']:
            clf, score, error = ft.quick_crossvalidate(train_data,
                                                       alg=algorithm)

        if 'pers' in mydict['parameter_choice']:

            if (algorithm == 'KNN'):
                k_neighbour = mydict['k_neighbour'][0]
                print(k_neighbour)
                clf, score, error = ft.pers_crossvalidation1(
                    train_data, algorithm, k_neighbour)
            if (algorithm == 'DCT'):
                max_features = mydict['max_features'][0]
                clf, score, error = ft.pers_crossvalidation1(
                    train_data, algorithm, max_features)
            if (algorithm == 'SVM'):
                kernel = mydict['kernel']
                C = mydict['C']
                clf, score, error = ft.pers_crossvalidation2(
                    train_data, algorithm, kernel, C)
            if (algorithm == 'RFC'):
                max_features = mydict['max_features']
                number_estimators = mydict['number_estimators']
                clf, score, error = ft.pers_crossvalidation2(
                    train_data, algorithm, max_features, number_estimators)
            if (algorithm == 'ADA'):
                number_estimators = mydict['number_estimators']
                learning_rate = mydict['learning_rate']
                clf, score, error = ft.pers_crossvalidation2(
                    train_data, algorithm, number_estimators, learning_rate)
            if (algorithm == 'LDA'):
                solver = mydict['solver']
                clf, score, error = ft.pers_crossvalidation1(
                    train_data, algorithm, solver)

        if 'auto' in mydict['parameter_choice']:
            metrics = mydict['maximize'][0]
            # print  metrics
            if (algorithm == 'ALL'):
                clf, auto_alg_result_mat = ft.bestAlg(train_data, metrics)
            else:
                clf, loc_metric, loc_error, loc_mat = ft.bestfit(
                    train_data, algorithm, metrics)

        dic_metric, conf_mat = ft.test_learning(clf, test_data)

        # print dic_metric, conf_mat
        # print  best_feat_n_mat

        # print "BEST FEAT NUMBER: PRECISION IN FUNFCION OF THE UMBER", type(best_feat_n_mat), best_feat_n_mat
        # print "CONFUSION MATRIX" ,conf_mat
        # print "DICTIONARY OF THE METRICS", dic_metric

        best_feat_json = best_feat_n_mat  # see if it's needed
        # categories = #PICK ALGORITHM CATEGORIES
        if best_feat_n_mat != []:
            s = linegraph3()
            best_feat_json = s.get_data(
                data_tmp=best_feat_n_mat.tolist())  # xcategories = categories)
            best_feat_json = json.dumps(best_feat_json)
            print "LA MATRICE DELLE FEATURES, CHE SUL TEMPLATE FUNZIONA  ", best_feat_json

        # PART OF THE BEST ALGORITHM
        if auto_alg_result_mat != None:
            s = linegraph3()
            auto_alg_result_mat = list(np.array(auto_alg_result_mat[:, 1:]))
            algorithm_categories = [
                'KNN', 'SVM', 'DCT', 'RND', 'ADA', 'QDA', 'LDA'
            ]  # TODO PEDOT FUNCTION!!!!
            auto_alg_result_mat = s.get_data(
                data_tmp=auto_alg_result_mat,
                xcategories=algorithm_categories,
                tipo="errorbar",
                title="precision of the various algorithms")
            auto_alg_result_mat = json.dumps(auto_alg_result_mat)

        # PART OF THE METRICS VALUE!!!

        dict_sigla_parola = {
            'ACC': 'accuracy %',
            'F1M': 'F-Test macro',
            'F1m': 'F-Test micro',
            'F1W': 'F-Test weighted',
            'WHM': 'Weighted Harmonic Mean of precision and recall',
            'PRM': 'Precision Score Macro',
            'PRm': 'Precision Score Micro',
            'PRW': 'Precision Score Weighted',
            'REM': 'Recall Score Macro',
            'REm': 'Recall Score Micro',
            'REW': 'Recall Score Weighted'
        }

        print "dizionario -------", dic_metric
        s = linegraph3()
        xcate = []
        for i in dic_metric.keys():
            xcate.append(dict_sigla_parola[i])
        metrics = s.get_data(data_tmp=dic_metric.values(),
                             title="metrics accuracy",
                             tipo="scatter",
                             xcategories=xcate)
        metrics = json.dumps(metrics)
        print "metriche --->", metrics  # todo delete line
        # PART OF THE CONFUSION MATRIX
        h = heatmap()
        # conf_mat = conf_mat.tolist() #!!!important
        conf_data = h.get_data(conf_mat)
        conf_data = json.dumps(conf_data)

        context = {
            'auto_alg_result_mat':
            auto_alg_result_mat,  # boxplot with the algorithms cosres
            'conf_mat': conf_data,
            'metrics': metrics,
            'list_of_feat': list_of_feat,
            # second part, with the list of features and the function score vs nfeat
            'best_feat_scores': best_feat_n_mat,
        }

        if best_feat_n_mat != []:
            best_feat_n_mat[:, 1] *= 100.0
        # best_feat_n_mat = best_feat_n_mat.tolist() #LIST: CONTAINS THE PRECISION IN FUNCTIONM OF THE NUMBER OF FEATURES

        # TODO check the 'matrix for'
        # todo best n feat NAMES?
        if list_of_feat:
            list_of_feat = [i.replace('_', ' ') for i in list_of_feat]
        template = "machine_learning/results.html"
        # context = {'conf_mat': conf_data,
        #            'dic_result':dic_metric,  # essential part, the last one (conf.matrix)
        #            'list_of_feat': list_of_feat,     #second part, with the list of features and the function score vs nfeat
        #            'best_feat_scores': best_feat_n_mat,
        #            'dict_sigla_parola': dict_sigla_parola,
        #            'best_feat_scores_json': best_feat_json,
        #
        #            'auto_alg_result_mat':auto_alg_result_mat, #boxplot with the algorithms cosres
        #            }
        return render(request, template, context)

        # CALL OTHER FUNCTIONS / GET OTHER DATAS/
        # final_ml_page(request, result_dict=dic_metric, conf_mat=conf_mat)
    # ---------------------------------------------------------------------------------------
    # TODO HERE STARTS THE FINAL PART OF THE MACHINE LEARNING, WHICH IS NO MORE PROCESSING BUT JUST RENDERING THE FORM (and getting the json)
    # -------------------------------------------------------------------------------

    else:
        template = "machine_learning/ml_input.html"
        form_viewf = viewFeatures()
        form_f_par = FeatPar()
        form_test_par = TestParam()
        form_alg_choose = AlgChoose()
        form_alg_param = AlgParam()

        # form_knn = KnnParam()
        form_svm = SvmParam()
        form_knear = KNearParam()
        form_dectree = DecTreeParam()
        form_rndfor = RndForParam()
        form_adaboost = AdaBoostParam()
        form_lda = LatDirAssParam()
        form_autoParam = autoFitParam()
        form_list = [
            form_svm, form_knear, form_dectree, form_rndfor, form_adaboost,
            form_lda
        ]

        id_list = getprocessedrecordid(id_record)
        # print  id_list
        # id_list=[(i, str(i)) for i in id_list ]
        # print id_list
        form_list_id = id_choose(choices=id_list)
        print '###############'
        print form_list_id
        print '###############'
        # print(form_viewf)
        # print form_f_par

        context = {
            'viewf': form_viewf,
            'FPar': FeatPar,
            'TPar': form_test_par,
            'AlgChoose': form_alg_choose,
            'AlgParamChoose': form_alg_param,
            'forms': {
                'form_SVM': form_svm,
                'form_KNN': form_knear,
                'form_DCT': form_dectree,
                'form_RFC': form_rndfor,
                'form_ADA': form_adaboost,
                'form_LDA': form_lda,
            },
            'autoParam': form_autoParam,
            'formListId': form_list_id
        }
        print '-' * 60
        # print context['forms']
        # print '-' * 60
        return render(request, template, context)
Esempio n. 3
0
def ml_input(request, id_record):  # obviously, it has to be added id record and everything concerning db
    if (request.method == 'POST'):

        # print "culoculoculoculo"  # GET THE POST, ELABORATE AND GO TO THE DB OR THE PLOT
        # print request.POST
        mydict = dict(request.POST.iterlists())
        # for key in request.POST.iterkeys():  # "for key in request.GET" works too.
        #     # Add filtering logic here.
        #
        #     print key, request.POST.getlist(key)

        print mydict
        # print '-' * 60
        # localdir = '/home/emanuele/wv_physio/PhysioWat/PhysioWat/preproc/scripts/processing_scripts/output/'
        # input_data = pd.DataFrame.from_csv(path=localdir + 'feat_claire_labeled.csv')  # , index_col=None, sep=','
        exprecid = mydict['choose_id']
        print exprecid
        input_data = pddbload.load_file_pd_db(int(exprecid[0]))
        num_feat = -1  # set to -1 because of
        print 'Ciao'
        print input_data.shape
        print 'hola'
        percentage = mydict['test_percentage'][0]
        percentage = float(percentage) / 100.0

        num_iteration = mydict['number_of_iterations'][0]
        ft.iterations = int(num_iteration)
        algorithm = mydict['alg_choice'][0]
        flag = True

        flag_has_selected_auto_feat = False
        list_of_feat = None
        best_feat_n_mat = []
        num_feat = -1
        auto_alg_result_mat = None
        best_feat_json = None

        if 'viewf' in mydict:
            print "hellp"
            if 'norm' in mydict['viewf']:
                input_data = ft.normalize(input_data)
                print input_data.shape
            train_data, test_data = ft.split(input_data, percentage)
            print train_data.shape, test_data.shape
            flag = False

            if 'sel' in mydict['viewf']:
                # print "i have selected the first stuff!"
                if 'k_selected' in mydict['FeatChoose']:
                    num_feat = int(mydict['feat_num'][0])
                    if (num_feat <= 0):
                        return render(request, "machine_learning/form_error.html")
                    train_data, test_data, list_of_feat = ft.getfeatnumber(train_data, test_data,
                                                                           num_feat)  # RETURNS 2 SUBSET DF GIVEN IN INPUT THE TRAIN DATA, THE TEST DATA, AND THE NUMBER OF FEATS

                if ('k_auto' in mydict['FeatChoose']):
                    print "hi"
                    train_data, test_data, best_feat_n_mat, list_of_feat = ft.bestfeatn(train_data, test_data)
                    flag_has_selected_auto_feat = True
                    list_of_feat = list_of_feat[4]

        if (flag == True):
            train_data, test_data = ft.split(input_data, percentage)
            flag = False

        if (algorithm == 'ALL') and ('auto' not in mydict['parameter_choice']):
            return render(request, "machine_learning/form_error.html")

        if 'def' in mydict['parameter_choice']:
            clf, score, error = ft.quick_crossvalidate(train_data, alg=algorithm)

        if 'pers' in mydict['parameter_choice']:

            if (algorithm == 'KNN'):
                k_neighbour = mydict['k_neighbour'][0]
                print(k_neighbour)
                clf, score, error = ft.pers_crossvalidation1(train_data, algorithm, k_neighbour)
            if (algorithm == 'DCT'):
                max_features = mydict['max_features'][0]
                clf, score, error = ft.pers_crossvalidation1(train_data, algorithm, max_features)
            if (algorithm == 'SVM'):
                kernel = mydict['kernel']
                C = mydict['C']
                clf, score, error = ft.pers_crossvalidation2(train_data, algorithm, kernel, C)
            if (algorithm == 'RFC'):
                max_features = mydict['max_features']
                number_estimators = mydict['number_estimators']
                clf, score, error = ft.pers_crossvalidation2(train_data, algorithm, max_features, number_estimators)
            if (algorithm == 'ADA'):
                number_estimators = mydict['number_estimators']
                learning_rate = mydict['learning_rate']
                clf, score, error = ft.pers_crossvalidation2(train_data, algorithm, number_estimators, learning_rate)
            if (algorithm == 'LDA'):
                solver = mydict['solver']
                clf, score, error = ft.pers_crossvalidation1(train_data, algorithm, solver)

        if 'auto' in mydict['parameter_choice']:
            metrics = mydict['maximize'][0]
            # print  metrics
            if (algorithm == 'ALL'):
                clf, auto_alg_result_mat = ft.bestAlg(train_data, metrics)
            else:
                clf, loc_metric, loc_error, loc_mat = ft.bestfit(train_data, algorithm, metrics)

        dic_metric, conf_mat = ft.test_learning(clf, test_data)

        # print dic_metric, conf_mat
        # print  best_feat_n_mat

        # print "BEST FEAT NUMBER: PRECISION IN FUNFCION OF THE UMBER", type(best_feat_n_mat), best_feat_n_mat
        # print "CONFUSION MATRIX" ,conf_mat
        # print "DICTIONARY OF THE METRICS", dic_metric


        best_feat_json = best_feat_n_mat  # see if it's needed
        # categories = #PICK ALGORITHM CATEGORIES
        if best_feat_n_mat != []:
            s = linegraph3()
            best_feat_json = s.get_data(data_tmp=best_feat_n_mat.tolist())  # xcategories = categories)
            best_feat_json = json.dumps(best_feat_json)
            print "LA MATRICE DELLE FEATURES, CHE SUL TEMPLATE FUNZIONA  ", best_feat_json

        # PART OF THE BEST ALGORITHM
        if auto_alg_result_mat != None:
            s = linegraph3()
            auto_alg_result_mat = list(np.array(auto_alg_result_mat[:, 1:]))
            algorithm_categories = ['KNN', 'SVM', 'DCT', 'RND', 'ADA', 'QDA', 'LDA']  # TODO PEDOT FUNCTION!!!!
            auto_alg_result_mat = s.get_data(data_tmp=auto_alg_result_mat, xcategories=algorithm_categories,
                                             tipo="errorbar", title="precision of the various algorithms")
            auto_alg_result_mat = json.dumps(auto_alg_result_mat)

        # PART OF THE METRICS VALUE!!!

        dict_sigla_parola = {'ACC': 'accuracy %',
                             'F1M': 'F-Test macro',
                             'F1m': 'F-Test micro',
                             'F1W': 'F-Test weighted',
                             'WHM': 'Weighted Harmonic Mean of precision and recall',
                             'PRM': 'Precision Score Macro',
                             'PRm': 'Precision Score Micro',
                             'PRW': 'Precision Score Weighted',
                             'REM': 'Recall Score Macro',
                             'REm': 'Recall Score Micro',
                             'REW': 'Recall Score Weighted'}

        print "dizionario -------", dic_metric
        s = linegraph3()
        xcate = []
        for i in dic_metric.keys():
            xcate.append(dict_sigla_parola[i])
        metrics = s.get_data(data_tmp=dic_metric.values(), title="metrics accuracy", tipo="scatter", xcategories=xcate)
        metrics = json.dumps(metrics)
        print "metriche --->", metrics  # todo delete line
        # PART OF THE CONFUSION MATRIX
        h = heatmap()
        # conf_mat = conf_mat.tolist() #!!!important
        conf_data = h.get_data(conf_mat)
        conf_data = json.dumps(conf_data)

        context = {'auto_alg_result_mat': auto_alg_result_mat,  # boxplot with the algorithms cosres
                   'conf_mat': conf_data,
                   'metrics': metrics,
                   'list_of_feat': list_of_feat,
                   # second part, with the list of features and the function score vs nfeat
                   'best_feat_scores': best_feat_n_mat,
                   }

        if best_feat_n_mat != []:
            best_feat_n_mat[:, 1] *= 100.0
        # best_feat_n_mat = best_feat_n_mat.tolist() #LIST: CONTAINS THE PRECISION IN FUNCTIONM OF THE NUMBER OF FEATURES


        # TODO check the 'matrix for'
        # todo best n feat NAMES?
        if list_of_feat:
            list_of_feat = [i.replace('_', ' ') for i in list_of_feat]
        template = "machine_learning/results.html"
        # context = {'conf_mat': conf_data,
        #            'dic_result':dic_metric,  # essential part, the last one (conf.matrix)
        #            'list_of_feat': list_of_feat,     #second part, with the list of features and the function score vs nfeat
        #            'best_feat_scores': best_feat_n_mat,
        #            'dict_sigla_parola': dict_sigla_parola,
        #            'best_feat_scores_json': best_feat_json,
        #
        #            'auto_alg_result_mat':auto_alg_result_mat, #boxplot with the algorithms cosres
        #            }
        return render(request, template, context)


        # CALL OTHER FUNCTIONS / GET OTHER DATAS/
        # final_ml_page(request, result_dict=dic_metric, conf_mat=conf_mat)
    # ---------------------------------------------------------------------------------------
    # TODO HERE STARTS THE FINAL PART OF THE MACHINE LEARNING, WHICH IS NO MORE PROCESSING BUT JUST RENDERING THE FORM (and getting the json)
    # -------------------------------------------------------------------------------


    else:
        template = "machine_learning/ml_input.html"
        form_viewf = viewFeatures()
        form_f_par = FeatPar()
        form_test_par = TestParam()
        form_alg_choose = AlgChoose()
        form_alg_param = AlgParam()

        # form_knn = KnnParam()
        form_svm = SvmParam()
        form_knear = KNearParam()
        form_dectree = DecTreeParam()
        form_rndfor = RndForParam()
        form_adaboost = AdaBoostParam()
        form_lda = LatDirAssParam()
        form_autoParam = autoFitParam()
        form_list = [form_svm, form_knear, form_dectree, form_rndfor, form_adaboost, form_lda]

        id_list = getprocessedrecordid(id_record)
        # print  id_list
        # id_list=[(i, str(i)) for i in id_list ]
        # print id_list
        form_list_id = id_choose(choices=id_list)
        print '###############'
        print form_list_id
        print '###############'
        # print(form_viewf)
        # print form_f_par

        context = {'viewf': form_viewf,
                   'FPar': FeatPar,
                   'TPar': form_test_par,
                   'AlgChoose': form_alg_choose,
                   'AlgParamChoose': form_alg_param,
                   'forms': {'form_SVM': form_svm,
                             'form_KNN': form_knear,
                             'form_DCT': form_dectree,
                             'form_RFC': form_rndfor,
                             'form_ADA': form_adaboost,
                             'form_LDA': form_lda, },

                   'autoParam': form_autoParam,
                   'formListId': form_list_id
                   }
        print '-' * 60
        # print context['forms']
        # print '-' * 60
        return render(request, template, context)