def download_cic_result(request, *args, **kwargs):
    try:
        context = {}
        logger = logger_hander.set_logger()
        mapped_df = request.session['Mapped_records']
        result_df = request.session['Total_Records']
        mapped_table = request.session['tables']
        xlsx_Result_sheet_names = request.session['file']
        if request.method == 'GET':
            context.update({
                "success": "Prediction Done . Result is Ready to Download.",
                "file": xlsx_Result_sheet_names,
                "tables": mapped_table,
                "Mapped_records": mapped_df,
                "Total_Records": result_df
            })
            return render(request, 'search_by_CIC_id.html', context)

        if request.method == 'POST':
            path = "download/{}".format(xlsx_Result_sheet_names)
            if os.path.exists(path):
                response = HttpResponse(
                    content_type='application/vnd.ms-excel')
                response[
                    'Content-Disposition'] = 'attachment; filename={}'.format(
                        path)
                wb = openpyxl.load_workbook(path)
                wb.save(response)
                os.remove(path)
                logger.info("Response sent Successfully !!!")
                return response
            else:
                logger.exception('File Already Downloaded')
                messages.error(request, 'File Already Downloaded')
                return redirect('/')

    except Exception as e:
        logger.exception(
            'File Already Downloaded or Something Went Wrong, plz try again.{}'
            .format(e))
        messages.error(
            request,
            'File Already Downloaded or Something Went Wrong, plz try again.')
        return redirect('/')
Esempio n. 2
0
            :param sheet: google sheet instance, List_cell_name:list of google sheet cells name
            :return: true or flase
        """
        flag = True
        try:
            for cell_name in List_cell_name:
                cell = sheet.find(cell_name)
            return flag
        except gspread.exceptions.CellNotFound:
            flag = False
            logger.error("CellNotFound Exception")
            return flag

    def save_output_into_sheet(self, worksheet, df_list):
        """
            This function will Save & Append output into Google sheet
            :param worksheet: worksheet instance, df_list: list of data rows
            :return: true or worksheet not found exception
        """
        try:
            for row in df_list:
                worksheet.append_row(row)
            logger.info("Output response of Rasa has been appended Successfully..!")
            return True
        except Exception as e:
            excepName = type(e).__name__
            logger.error(" Updating google sheet " + excepName)
            return excepName

logger = logger_hander.set_logger()
Esempio n. 3
0
def recommendation(request):
    logger = logger_hander.set_logger()

    with open('config.json') as config_file:
        config_data = json.load(config_file)

    model_file_path = config_data['model_file_path']
    xlsx_Test_worksheet_name = config_data['xlsx_Test_worksheet_name']
    xlsx_column_name_list = config_data['xlsx_column_name_list']
    xlsx_primary_header_name = config_data['xlsx_primary_header_name']

    context = {}

    if request.method == 'POST':
        myFile = request.FILES.get('excel_file', None)
        if not myFile:
            logger.error(" File Not Found. Please Upload a Xlsx file.")
            messages.error(request,
                           'File Not Found. Please Upload a Xlsx file.')
            return redirect('/')
        if str(myFile).split('.')[-1] != "xlsx":
            logger.error(" Wrong File Format. Please Upload a Xlsx file")
            messages.error(request,
                           'Wrong File Format. Please Upload a Xlsx file')
            return redirect('/')
        try:
            test_dataframe = pd.read_excel(myFile,
                                           sheet_name=xlsx_Test_worksheet_name,
                                           engine='openpyxl')
        except Exception as e:
            logger.exception(
                "XLSX file not having Worksheet name - 'Data' : {}".format(e))
            messages.error(request,
                           'XLSX file not having Worksheet name - Data')
            return redirect('/')

        if not test_dataframe.empty:

            if (all(x in test_dataframe.columns
                    for x in xlsx_column_name_list)):
                test_dataframe.dropna(subset=[xlsx_primary_header_name],
                                      inplace=True)
                pickle_model = load_model(model_file_path, logger)
                final_test_dataframe = fetch_required_features(
                    test_dataframe, xlsx_column_name_list, logger)
                try:
                    final_df = predict_result(pickle_model,
                                              final_test_dataframe,
                                              test_dataframe, logger)
                    for x in xlsx_column_name_list:
                        if final_df[x].isna().sum() > 0:
                            final_df.loc[final_df[x].isna(),
                                         'Mapped to Company'] = "NA"
                            # print(final_df[final_df[x].isna()])

                    mapped_df = show_tables(final_df)
                    logger.info("mapped records :{}".format(mapped_df))

                    with pd.ExcelWriter(myFile) as writer:
                        xlsx_Result_worksheet_name = str(myFile).split(
                            ".")[0] + "-MRE-OP-" + str(datetime.datetime.now(
                            ).strftime("%Y-%m-%d-%H-%M-%S"))
                        final_df.to_excel(
                            writer,
                            sheet_name=str(xlsx_Result_worksheet_name),
                            index=False)
                        writer.save()
                        writer.close()
                    wb = openpyxl.load_workbook(myFile)
                    xlsx_Result_sheet_names = "MRE_Result_" + str(
                        myFile).split(".")[0] + "_" + str(
                            datetime.datetime.now().strftime(
                                "%Y-%m-%d-%H-%M-%S")) + ".xlsx"
                    wb.save("download/{}".format(xlsx_Result_sheet_names))

                    request.session['final_df_cols'] = final_df.columns.tolist(
                    )
                    request.session[
                        'final_df_values'] = final_df.values.tolist()
                    request.session['tables'] = [
                        mapped_df.to_html(index=False,
                                          classes='students',
                                          header="true")
                    ]
                    request.session['Mapped_records'] = len(mapped_df)
                    request.session['Total_Records'] = len(final_df)
                    request.session['file'] = xlsx_Result_sheet_names

                    logger.info(
                        "Redirect to download CSV and view Mapped tables - Response sent Successfully !!!"
                    )
                    # return render(request, 'recommendation.html', context)
                    return redirect('download_result')
                except Exception as e:
                    logger.exception(
                        'Required column list not present in "Data" Worksheet.'
                    )
                    messages.error(
                        request,
                        'Required column list not present in "Data" Worksheet.'
                    )
                    return redirect('/')

                wb.save(xlsx_Result_sheet_names)

                logger.info(" Response sent Successfully !!!")
                context.update({
                    "success": "Prediction Done. Result is Ready to Download.",
                    "file": xlsx_Result_sheet_names
                })
                # return render(request, 'recommendation.html', context)
                return redirect(
                    '/download_result/?type=GET&sheetName={}'.format(
                        xlsx_Result_sheet_names))

            else:
                logger.exception(
                    'Required column list not present in "Data" Worksheet.')
                messages.error(
                    request,
                    'Required column list not present in "Data" Worksheet.')
                return redirect('/')

        else:
            logger.exception('xlsx File of having "Data" Worksheet is Empty.')
            messages.error(request,
                           'xlsx File of having "Data" Worksheet is Empty.')
            return redirect('/')

    return render(request, 'recommendation.html', context)
Esempio n. 4
0
def save_df_db(request):
    context = {}
    logger = logger_hander.set_logger()
    if request.method == 'GET':
        try:
            with open('config.json') as config_file:
                config_data = json.load(config_file)

            xlsx_Store_DB_primary_header_name = config_data[
                'xlsx_primary_header_name']
            xlsx_column_name_list = config_data['xlsx_column_name_list']
        except Exception as e:
            logger.exception(
                "did not able to read config.json file : {}".format(e))
            return HttpResponse('Config file Issue or Internal Issue')

        final_df_val = request.session['final_df_values']
        final_df_cols = request.session['final_df_cols']

        load_dataframe = pd.DataFrame(final_df_val, columns=final_df_cols)
        load_dataframe.dropna(subset=[xlsx_Store_DB_primary_header_name],
                              inplace=True)

        incorrect_dataframe = load_dataframe[
            load_dataframe[xlsx_column_name_list].isnull().any(1)]
        incorrect_dataframe = pd.DataFrame(incorrect_dataframe[
            incorrect_dataframe[xlsx_column_name_list].isna().any(1)])

        correct_dataframe = pd.DataFrame(
            load_dataframe[~load_dataframe[xlsx_column_name_list].isnull().
                           any(1)])
        correct_dataframe = pd.DataFrame(
            correct_dataframe[~correct_dataframe[xlsx_column_name_list].isna().
                              any(1)])

        try:
            for i, row in correct_dataframe.iterrows():
                if not MREDB.objects.filter(Admit_ID=load_dataframe[
                        xlsx_Store_DB_primary_header_name][i]).exists():
                    db_cursor = MREDB.objects.create(
                        Admit_ID=load_dataframe['Admit Id'][i],
                        Name=load_dataframe['Name'][i],
                        T=load_dataframe['T'][i],
                        R=load_dataframe['R'][i],
                        A=load_dataframe['A'][i],
                        C=load_dataframe['C'][i],
                        K=load_dataframe['K'][i],
                        Last_Status=load_dataframe['Last Status'][i],
                        Last_Internal_Rating=load_dataframe[
                            'Last Internal Rating'][i],
                        Techability_Score=load_dataframe['Techability Score']
                        [i],
                        Learnability_Score=load_dataframe['Learnability Score']
                        [i],
                        Communicability_Score=load_dataframe[
                            'Communicability Score'][i],
                        Avg_TRACK=load_dataframe['Avg TRACK Score'][i],
                        Present_Percent=load_dataframe['Present Percent'][i],
                        Overall_Remarks=load_dataframe['Overall Remarks'][i],
                        Mapped_to_Company=load_dataframe['Mapped to Company']
                        [i])
                    db_cursor.save()
            context.update({
                "db_df_save":
                "Successfully Store XLSX-Data into Database..!"
            })
            logger.info("Successfully Store XLSX-Data into Database..!")
            return render(request, 'saveDB.html', context)
        except Exception as e:
            logger.exception(
                "Something went wrong while saving df into db : {}".format(e))
            messages.error(
                request,
                'Something went wrong while saving data into database.')
            return redirect('/')
def searchCIC(request):
    logger = logger_hander.set_logger()
    try:
        with open('config.json') as config_file:
            config_data = json.load(config_file)

        xlsx_Search_CIC_worksheet_name = config_data[
            'xlsx_Test_worksheet_name']
        xlsx_Search_CIC_column_name_list = config_data[
            'xlsx_Search_CIC_column_name_list']
        xlsx_Store_DB_column_name_list = config_data[
            'xlsx_Store_DB_column_name_list']
        xlsx_Store_DB_primary_header_name = config_data[
            'xlsx_primary_header_name']
        model_file_path = config_data['model_file_path']
    except Exception as e:
        logger.exception(
            "did not able to read config.json file : {}".format(e))
        messages.error(request, 'Config file Issue or Internal Issue')
        return redirect('search_cic_id')

    context = {}
    result_df = pd.DataFrame(columns=xlsx_Store_DB_column_name_list)

    if request.method == 'POST':
        myFile = request.FILES.get('excel_file', None)
        if not myFile:
            logger.error(" File Not Found. Please Upload a Xlsx file.")
            messages.error(request,
                           'File Not Found. Please Upload a Xlsx file.')
            return redirect('search_cic_id')
        if str(myFile).split('.')[-1] != "xlsx":
            logger.error(" Wrong File Format. Please Upload a Xlsx file")
            messages.error(request,
                           'Wrong File Format. Please Upload a Xlsx file')
            return redirect('search_cic_id')
        try:
            search_CIC_dataframe = pd.read_excel(
                myFile,
                sheet_name=xlsx_Search_CIC_worksheet_name,
                engine='openpyxl')
        except Exception as e:
            logger.exception(
                "XLSX file not having Worksheet name - 'Data' : {}".format(e))
            messages.error(request,
                           'XLSX file not having Worksheet name - "Data"')
            return redirect('search_cic_id')

        if not search_CIC_dataframe.empty:
            try:
                if (all(x in search_CIC_dataframe.columns
                        for x in xlsx_Search_CIC_column_name_list)):
                    search_CIC_dataframe.dropna(
                        subset=[xlsx_Store_DB_primary_header_name],
                        inplace=True)
                    for i in range(len(search_CIC_dataframe)):
                        if MREDB.objects.filter(Admit_ID=search_CIC_dataframe[
                                xlsx_Store_DB_primary_header_name]
                                                [i]).exists():
                            serach_record = MREDB.objects.filter(
                                Admit_ID=search_CIC_dataframe[
                                    xlsx_Store_DB_primary_header_name][i])[0]

                            Admit_ID = serach_record.Admit_ID
                            Name = serach_record.Name
                            T = serach_record.T
                            R = serach_record.R
                            A = serach_record.A
                            C = serach_record.C
                            K = serach_record.K
                            Last_Status = serach_record.Last_Status
                            Last_Internal_Rating = serach_record.Last_Internal_Rating
                            Techability_Score = serach_record.Techability_Score
                            Learnability_Score = serach_record.Learnability_Score
                            Communicability_Score = serach_record.Communicability_Score
                            Avg_TRACK = serach_record.Avg_TRACK
                            Present_Percent = serach_record.Present_Percent
                            Overall_Remarks = serach_record.Overall_Remarks

                            if serach_record.Mapped_to_Company != "nan":
                                Mapped_to_Company = serach_record.Mapped_to_Company
                                result_df = result_df.append(
                                    {
                                        "Admit Id": Admit_ID,
                                        "Name": Name,
                                        "T": T,
                                        "R": R,
                                        "A": A,
                                        "C": C,
                                        "K": K,
                                        "Last Status": Last_Status,
                                        "Last Internal Rating":
                                        Last_Internal_Rating,
                                        "Techability Score": Techability_Score,
                                        "Learnability Score":
                                        Learnability_Score,
                                        "Communicability Score":
                                        Communicability_Score,
                                        "Avg TRACK Score": Avg_TRACK,
                                        "Present Percent": Present_Percent,
                                        "Overall Remarks": Overall_Remarks,
                                        "Mapped to Company": Mapped_to_Company
                                    },
                                    ignore_index=True)

                            else:

                                pickle_model = load_model(
                                    model_file_path, logger)
                                Last_Status, Last_Internal_Rating = convert_column(
                                    Last_Status, Last_Internal_Rating, logger)
                                final_record = [
                                    T, R, A, C, K, Last_Status,
                                    Last_Internal_Rating
                                ]
                                logger.info(
                                    " Record Parameters for Testing :{}".
                                    format(final_record))
                                final_result = predict_result(
                                    pickle_model, [final_record], logger)
                                serach_record.Mapped_to_Company = final_result
                                serach_record.save()

                                result_df = result_df.append(
                                    {
                                        "Admit Id":
                                        Admit_ID,
                                        "Name":
                                        Name,
                                        "T":
                                        T,
                                        "R":
                                        R,
                                        "A":
                                        A,
                                        "C":
                                        C,
                                        "K":
                                        K,
                                        "Last Status":
                                        Last_Status,
                                        "Last Internal Rating":
                                        Last_Internal_Rating,
                                        "Techability Score":
                                        Techability_Score,
                                        "Learnability Score":
                                        Learnability_Score,
                                        "Communicability Score":
                                        Communicability_Score,
                                        "Avg TRACK Score":
                                        Avg_TRACK,
                                        "Present Percent":
                                        Present_Percent,
                                        "Overall Remarks":
                                        Overall_Remarks,
                                        "Mapped to Company":
                                        serach_record.Mapped_to_Company
                                    },
                                    ignore_index=True)

                    mapped_df = show_tables(result_df)
                    logger.info("mapped Records: {}".format(len(mapped_df)))

                    with pd.ExcelWriter(myFile) as writer:
                        xlsx_Result_worksheet_name = str(myFile).split(
                            ".")[0] + "-MRE-OP-" + str(datetime.datetime.now(
                            ).strftime("%Y-%m-%d-%H-%M-%S"))
                        result_df.to_excel(
                            writer,
                            sheet_name=str(xlsx_Result_worksheet_name),
                            index=False)
                        writer.save()
                        writer.close()
                    wb = openpyxl.load_workbook(myFile)

                    xlsx_Result_sheet_names = "MRE_Result_" + str(
                        myFile).split(".")[0] + "_" + str(
                            datetime.datetime.now().strftime(
                                "%Y-%m-%d-%H-%M-%S")) + ".xlsx"
                    wb.save("download/{}".format(xlsx_Result_sheet_names))

                    request.session['tables'] = [
                        mapped_df.to_html(index=False,
                                          classes='students',
                                          header="true")
                    ]
                    request.session['Mapped_records'] = len(mapped_df)
                    request.session['Total_Records'] = len(result_df)
                    request.session['file'] = xlsx_Result_sheet_names

                    logger.info(
                        "Redirect to download CSV and view Mapped tables - Response sent Successfully !!!"
                    )

                    # return render(request, 'search_by_CIC_id.html', context )
                    return redirect('download_cic_result')

            except Exception as e:
                logger.exception("Something went wrong : {}".format(e))
                messages.error(request, 'Something went wrong')
                return redirect('search_cic_id')

    return render(request, 'search_by_CIC_id.html', context)
def saveDB(request):
    logger = logger_hander.set_logger()

    try:
        with open('config.json') as config_file:
            config_data = json.load(config_file)

        xlsx_Store_DB_worksheet_name = config_data['xlsx_Test_worksheet_name']
        xlsx_Store_DB_column_name_list = config_data[
            'xlsx_Store_DB_column_name_list']
        xlsx_Store_DB_primary_header_name = config_data[
            'xlsx_primary_header_name']
    except Exception as e:
        logger.exception(
            "did not able to read config.json file : {}".format(e))
        messages.error(request, 'Config file Issue or Internal Issue')
        return redirect('saveDB')

    context = {}
    if request.method == 'POST':
        myFile = request.FILES.get('excel_file', None)
        if not myFile:
            logger.error(" File Not Found. Please Upload a Xlsx file.")
            messages.error(request,
                           'File Not Found. Please Upload a Xlsx file.')
            return redirect('saveDB')
        if str(myFile).split('.')[-1] != "xlsx":
            logger.error(" Wrong File Format. Please Upload a Xlsx file")
            messages.error(request,
                           'Wrong File Format. Please Upload a Xlsx file')
            return redirect('saveDB')
        try:
            load_dataframe = pd.read_excel(
                myFile,
                sheet_name=xlsx_Store_DB_worksheet_name,
                engine='openpyxl')
        except Exception as e:
            logger.exception(
                "XLSX file not having Worksheet name - 'Store-Data' : {}".
                format(e))
            messages.error(
                request, 'XLSX file not having Worksheet name - "Store-Data"')
            return redirect('saveDB')
        if not load_dataframe.empty:
            try:
                if (all(x in load_dataframe.columns
                        for x in xlsx_Store_DB_column_name_list)):
                    load_dataframe.dropna(
                        subset=[xlsx_Store_DB_primary_header_name],
                        inplace=True)
                    for i in range(len(load_dataframe)):
                        if not MREDB.objects.filter(Admit_ID=load_dataframe[
                                xlsx_Store_DB_primary_header_name]
                                                    [i]).exists():
                            db_cursor = MREDB.objects.create(
                                Admit_ID=load_dataframe['Admit Id'][i],
                                Name=load_dataframe['Name'][i],
                                T=load_dataframe['T'][i],
                                R=load_dataframe['R'][i],
                                A=load_dataframe['A'][i],
                                C=load_dataframe['C'][i],
                                K=load_dataframe['K'][i],
                                Last_Status=load_dataframe['Last Status'][i],
                                Last_Internal_Rating=load_dataframe[
                                    'Last Internal Rating'][i],
                                Techability_Score=load_dataframe[
                                    'Techability Score'][i],
                                Learnability_Score=load_dataframe[
                                    'Learnability Score'][i],
                                Communicability_Score=load_dataframe[
                                    'Communicability Score'][i],
                                Avg_TRACK=load_dataframe['Avg TRACK Score'][i],
                                Present_Percent=load_dataframe[
                                    'Present Percent'][i],
                                Overall_Remarks=load_dataframe[
                                    'Overall Remarks'][i],
                                Mapped_to_Company=load_dataframe[
                                    'Mapped to Company'][i])
                            db_cursor.save()
                    context.update({
                        "success":
                        "Successfully Store XLSX-Data into Database..!"
                    })
                    logger.info(
                        "Successfully Store XLSX-Data into Database..!")
                    return render(request, 'saveDB.html', context)
            except Exception as e:
                logger.exception(
                    "XLSX file columns name is issue or model saving issue : {}"
                    .format(e))
                messages.error(
                    request,
                    'XLSX file columns name is issue or Internal issue')
                return redirect('saveDB')
    return render(request, 'saveDB.html', context)