コード例 #1
0
def get_product_compliance_details(spec_list, founded_category):
    try:
        negative_country = []
        positive_country = []
        others = []
        active_region = []
        inactive_region = []
        active_str = "No active region found"
        spec_query = (config.or_delimiter).join(spec_list)
        query = f'SUBID:({spec_query})'
        params = {"fl": config.notification_column_str}
        pcomp, pcomp_df = helper.get_data_from_core(
            config.solr_notification_status, query, params)
        if ("NOTIF" in list(pcomp_df.columns)) and len(pcomp) > 0:
            phrase_key = (list(pcomp_df["NOTIF"].unique()))
            phrase_split = ";".join(phrase_key)
            phrase_key = phrase_split.split(";")
            phrase_key_query = helper.replace_character_for_querying(
                phrase_key)
            query = f'PHRKY:({phrase_key_query})'
            params = {"fl": config.phrase_column_str}
            key_value, key_value_df = helper.get_data_from_core(
                config.solr_phrase_translation, query, params)
            key_compare = key_value_df.values.tolist()
            negative_country = []
            positive_country = []
            others = []
            for item in pcomp:
                try:
                    place = item.get("RLIST", config.hypen_delimiter)
                    key = str(item.get("NOTIF", "")).strip()
                    key_text = helper.finding_phrase_text(key_value_df, key)
                    if key_text.lower().strip(
                    ) in config.in_compliance_notification_status:
                        positive_country.append(place)
                    elif key_text.lower().strip(
                    ) in config.not_in_compliance_notification_status:
                        negative_country.append(place)
                    else:
                        others.append(place)
                except Exception as e:
                    pass

        #ag registartion
        for region in config.ag_registration_country:
            if region in founded_category:
                active_region.append(
                    config.ag_registration_country.get(region))
            else:
                inactive_region.append(
                    config.ag_registration_country.get(region))
        if len(active_region) > 0:
            active_str = (config.comma_delimiter).join(active_region)

    except Exception as e:
        pass
    return positive_country, negative_country, others, active_str
コード例 #2
0
def get_bio_compatibility_details(spec_json, spec_list):
    try:
        details = []
        spec_query = (config.or_delimiter).join(spec_list)
        query = f'SUBID:({spec_query})'
        check_column = ["BSTAT", "BTEST"]
        values, df = helper.get_data_from_core(
            config.solr_biocompatibility_testing_pri, query)
        key_value, key_value_df = get_related_phrase_text(check_column, df)
        for data in values:
            try:
                json_make = {}
                specid = data.get("SUBID", "")
                spec_nam_str = specid + (
                    config.hypen_delimiter) + spec_json.get(specid, "")
                json_make["spec_Id"] = spec_nam_str
                json_make["usage"] = str(
                    data.get("ZUSAGE", config.hypen_delimiter).strip())
                json_make["bstat"] = helper.finding_phrase_text(
                    key_value_df,
                    str(data.get("BSTAT", "")).strip())
                json_make["btest"] = helper.finding_phrase_text(
                    key_value_df,
                    str(data.get("BTEST", "")).strip())
                details.append(json_make)
            except Exception as e:
                pass
        return details
    except Exception as e:
        return []
コード例 #3
0
def get_ontology_details():
    try:
        query = f'KEY_TYPE:*'
        ontology_json = {}
        ontology_list = []
        result = []
        ontolgy_result, ontolgy_df = helper.get_data_from_core(
            config.solr_ontology, query)
        # nam_bdt_details=helper.namrod_bdt_product_details()
        for item in ontolgy_result:
            ontology_json = {}
            ontology_json["key"] = item.get("ONTOLOGY_KEY", "-")
            ontology_json["key_Category"] = item.get("KEY_TYPE", "-")
            ontology_json["synonyms"] = item.get("ONTOLOGY_VALUE", "-")
            ontology_json["created_By"] = item.get("CREATED_BY", "-")
            ontology_json["created_Date"] = item.get("CREATED_DATE", "-")
            ontology_json["updated_Date"] = item.get("UPDATED_DATE", "-")
            ontology_json["synonyms"] = item.get("ONTOLOGY_VALUE", "-")
            ontology_json["id"] = item.get("ID", "-")
            ontology_json["solr_Id"] = item.get("solr_id", "-")
            ontology_list.append(ontology_json)
        result = [{"ontology_Details": ontology_list}]
        # result=[{"ontology_Details":ontology_list,"product_Details":nam_bdt_details}]
        return result
    except Exception as e:
        return result
コード例 #4
0
def add_ontology_value(add_data):
    try:
        current_date = str(datetime.now(est))[:-9]
        conn = helper.SQL_connection()
        cursor = conn.cursor()
        ontology_value = add_data.get('ontologySynonyms',
                                      "").replace("'", "''")
        inser_value = f"'{add_data.get('synonymsProductName','')}','{add_data.get('synonymsProductType','')}','{ontology_value}','{add_data.get('synonymsCreatedBy','')}','{current_date}','{current_date}','NULL',1"
        insert_query = f"insert into [momentive].[ontology] values ({inser_value})"
        cursor.execute(insert_query)
        # return "added"
    except Exception as e:
        try:
            conn.rollback()
        except Exception as e:
            pass
        logging.info(f'error in updating {e}')
        status_code = 400
        message = f"cannot be added because of {e}"
    else:
        try:
            conn.commit()
            #finding ID from solr
            query = f'-ID:\-'
            sql_id = 0
            ontolgy_result, ontolgy_df = helper.get_data_from_core(
                config.solr_ontology, query)
            if "ID" in ontolgy_df.columns:
                ontolgy_df = ontolgy_df.replace({"-": "0"})
                ontolgy_df["ID"] = ontolgy_df["ID"].apply(pd.to_numeric)
                list_of_id = list(ontolgy_df["ID"].unique())
            sql_id = max(list_of_id)
            found_id = str(sql_id + 1)
            product_synonyms = add_data.get("ontologySynonyms", "")
            product = add_data.get("synonymsProductName", "")
            product_type = add_data.get("synonymsProductType", "")
            doc = {
                "ONTOLOGY_KEY": add_data.get("synonymsProductName", ""),
                "ID": str(sql_id + 1),
                "KEY_TYPE": add_data.get("synonymsProductType", ""),
                "ONTOLOGY_VALUE": add_data.get("ontologySynonyms", ""),
                "CREATED_BY": add_data.get('synonymsCreatedBy', ''),
                "CREATED_DATE": current_date,
                "UPDATED_DATE": current_date,
                "PROCESSED_FLAG": "",
                "IS_RELEVANT": "1"
            }
            #update in change_audit_log table
            audit_status = helper.update_in_change_audit_log(
                found_id, "Ontology Management",
                add_data.get('synonymsCreatedBy', ''), "insert", current_date,
                product_type, product, product_synonyms, "N")
            status_code = 200
            if audit_status == "updated in change audit log successfully":
                config.solr_ontology.add([doc])
            message = "added successfully"
        except Exception as e:
            message = "will be added soon"
    return [{"status": status_code, "message": message}]
コード例 #5
0
def find_zusage(all_details_json, cas_query, spec_query):
    try:
        std_usage = []
        hundrd_usage = []
        legal_usage = []
        usage_catgory = []
        if cas_query != '' and spec_query != '':
            query = f'CSUBI:({cas_query}) && SUBID:({spec_query})'
            std_hund_list = []
            legal_list = []
            std_values, std_df = helper.get_data_from_core(
                config.solr_std_composition, query)
            # std_df=std_df[std_df["CSUBI"].isin(cas_list)]
            if "ZUSAGE" in list(std_df.columns):
                std_usage = list(std_df["ZUSAGE"].unique())
            hund_values, hund_df = helper.get_data_from_core(
                config.solr_hundrd_composition, query)
            # hund_df=hund_df[hund_df["CSUBI"].isin(cas_list)]
            if "ZUSAGE" in list(hund_df.columns):
                hundrd_usage = list(hund_df["ZUSAGE"].unique())
            usage_catgory = std_usage + hundrd_usage
            edit_std_hund = []
            for i in list(set(usage_catgory)):
                json_make = {}
                json_make["name"] = i
                std_hund_list.append(json_make)
                edit_std_hund.append(i)
            legal_values, legal_df = helper.get_data_from_core(
                config.solr_legal_composition, query)
            # legal_df=legal_df[legal_df["CSUBI"].isin(cas_list)]
            legal_usage_edit = []
            if "ZUSAGE" in list(legal_df.columns):
                legal_usage = list(legal_df["ZUSAGE"].unique())
            for i in list(set(legal_usage)):
                json_make = {}
                json_make["name"] = i
                if 'PUBLIC: REG_EU' not in i:
                    legal_usage_edit.append(i)
                    legal_list.append(json_make)
        return std_hund_list, edit_std_hund, legal_list, legal_usage_edit
    except Exception as e:
        return [], []
コード例 #6
0
def get_registrations_company_details(spec_json, spec_list):
    try:
        details = []
        spec_query = (config.or_delimiter).join(spec_list)
        query = f'SUBID:({spec_query})'
        values, df = helper.get_data_from_core(
            config.solr_registration_company_specific, query)
        for data in values:
            try:
                json_make = {}
                specid = data.get("SUBID", "")
                spec_nam_str = specid
                json_make["spec_Id"] = spec_nam_str
                json_make["usage"] = str(
                    data.get("ZUSAGE", config.hypen_delimiter).strip())
                json_make["rebas"] = str(
                    data.get("REBAS", config.hypen_delimiter).strip())
                json_make["revsn"] = str(
                    data.get("REVSN", config.hypen_delimiter).strip())
                json_make["relst"] = str(
                    data.get("RELST", config.hypen_delimiter).strip())
                json_make["bukrs"] = str(
                    data.get("BUKRS", config.hypen_delimiter).strip())
                json_make["werks"] = str(
                    data.get("WERKS", config.hypen_delimiter).strip())
                json_make["zprior"] = str(
                    data.get("ZPRIOR", config.hypen_delimiter).strip())
                json_make["rdate"] = str(
                    data.get("RDATE", config.hypen_delimiter).strip())
                json_make["expdt"] = str(
                    data.get("EXPDT", config.hypen_delimiter).strip())
                json_make["regqy"] = str(
                    data.get("REGQY", config.hypen_delimiter).strip())
                json_make["regnm"] = str(
                    data.get("REGNM", config.hypen_delimiter).strip())
                json_make["regns"] = str(
                    data.get("REGNS", config.hypen_delimiter).strip())
                json_make["conso"] = str(
                    data.get("CONSO", config.hypen_delimiter).strip())
                json_make["stats"] = str(
                    data.get("STATS", config.hypen_delimiter).strip())
                json_make["regph"] = str(
                    data.get("REGPH", config.hypen_delimiter).strip())
                json_make["addin"] = str(
                    data.get("ADDIN", config.hypen_delimiter).strip())
                json_make["remar"] = str(
                    data.get("REMAR", config.hypen_delimiter).strip())
                details.append(json_make)
            except Exception as e:
                pass
        return details
    except Exception as e:
        return []
コード例 #7
0
def get_spec_id_for_updated_value(product_name, product_type):
    try:
        product_query = helper.replace_character_for_querying([product_name])
        if product_type == "NAMPROD":
            query = f'TYPE:NAMPROD && SUBCT:REAL_SUB && TEXT1:({product_query}) && -TEXT6:X'
        elif product_type == "BDT":
            query = f'TYPE:MATNBR && TEXT3:({product_query}) && -TEXT6:X'
        values, df = helper.get_data_from_core(config.solr_product, query)
        if "TEXT2" in df.columns:
            spec_list = list(df["TEXT2"].unique())
            spec_str = ";".join(spec_list)
        return spec_str
    except Exception as e:
        return ''
コード例 #8
0
def get_report_data_info(spec_list):
    try:
        report_flag = 'No'
        spec_list_query = (config.or_delimiter).join(spec_list)
        params = {"fl": config.report_column_str}
        core = config.solr_document_variant
        query = f'SUBID:({spec_list_query})'
        report_values, report_df = helper.get_data_from_core(
            core, query, params)
        if len(report_values) > 0:
            report_flag = 'Yes'
    except Exception as e:
        pass
    return report_flag
コード例 #9
0
def get_log_details(req_body):
    try:
        id_key = req_body.get("id", "")
        json_list = []
        #find created details
        solr_query = f"solr_id:{id_key}"
        values, df = helper.get_data_from_core(config.solr_unstructure_data,
                                               solr_query)
        if len(values) > 0:
            created_date = values[0].get("CREATED", "-")
            created_by = "PIH-Admin"
            sql_id = values[0].get("ID", "-")
            json_list = helper.make_log_details(sql_id, created_by,
                                                created_date)
    except Exception as e:
        pass
    return json_list
コード例 #10
0
def get_log_details(req_body):
    try:
        id_key = req_body.get("id", "")
        json_list = []
        #find created details
        solr_query = f"ID:{id_key}"
        values, df = helper.get_data_from_core(config.solr_ontology,
                                               solr_query)
        if len(values) > 0:
            created_date = values[0].get("CREATED_DATE", "-")
            created_by = values[0].get("CREATED_BY", "-")
            product = values[0].get("ONTOLOGY_KEY", "-")
            product_type = values[0].get("KEY_TYPE", "-")
            data_value = values[0].get("ONTOLOGY_VALUE", "-")
            json_list = helper.make_log_details(id_key, created_by,
                                                created_date, product_type,
                                                product, data_value)
    except Exception as e:
        pass
    return json_list
コード例 #11
0
def get_phrase_translation():
    try:
        details = []
        query = f'*:*'
        values, df = helper.get_data_from_core(config.solr_phrase_translation,
                                               query)
        for data in values:
            json_make = {}
            json_make["phrase_Key"] = str(
                data.get("PHRKY", config.hypen_delimiter).strip())
            json_make["phrase_Code"] = str(
                data.get("PHRCD", config.hypen_delimiter).strip())
            json_make["phrase_Graph"] = str(
                data.get("GRAPH", config.hypen_delimiter).strip())
            json_make["phrase_Text"] = str(
                data.get("PTEXT", config.hypen_delimiter).strip())
            details.append(json_make)
        return details
    except Exception as e:
        return []
コード例 #12
0
def get_related_phrase_text(column_check, check_df):
    try:
        temp = pd.DataFrame()
        total_phrky = []
        for key_column in column_check:
            try:
                if key_column in list(check_df.columns):
                    check_df[key_column] = check_df[key_column].astype(
                        str).str.strip()
                    phrase_key = list(check_df[key_column].unique())
                    phrase_split = ";".join(phrase_key)
                    total_phrky += phrase_split.split(";")
            except Exception as e:
                pass
        #finding phrase text
        phrase_key_query = helper.replace_character_for_querying(total_phrky)
        query = f'PHRKY:({phrase_key_query})'
        params = {"fl": config.phrase_column_str}
        key_value, key_value_df = helper.get_data_from_core(
            config.solr_phrase_translation, query, params)
        return key_value, key_value_df
    except Exception as e:
        return [], temp
コード例 #13
0
def home_page_details(all_details_json, spec_list, arranged_level_json):
    try:
        category = config.home_page_category
        home_page_details = {}
        product_attributes = []
        product_compliance = []
        customer_comm = []
        toxicology = []
        restricted_sub = []
        sales_information = []
        report_data = []
        #unstrucure details
        home_page_query = helper.unstructure_template(all_details_json,
                                                      category)
        params = {"fl": config.unstructure_column_str}
        unstructure_values, unstructure_df = helper.get_data_from_core(
            config.solr_unstructure_data, home_page_query, params)
        std, std_df, legal, legal_df = helper.make_common_query_for_std_legal_composition(
            all_details_json)
        if "CATEGORY" in list(unstructure_df.columns):
            founded_category = list(unstructure_df["CATEGORY"].unique())
        else:
            founded_category = []

        # product and material - info
        mat_str = ''
        product_list = []
        product_list, mat_str = get_product_material_details(
            arranged_level_json, spec_list, all_details_json)
        product_attributes.append(
            {"image": config.home_icon_product_attributes})
        product_attributes.append({
            "Product Identification":
            (config.comma_delimiter).join(product_list)
        })
        product_attributes.append({"Material Information": mat_str})
        product_attributes.append({"tab_modal": "compositionModal"})
        home_page_details["Product Attributes"] = product_attributes

        #product compliance
        positive_country, negative_country, others, active_str = get_product_compliance_details(
            spec_list, founded_category)
        product_compliance.append(
            {"image": config.home_icon_product_compliance})
        product_compliance.append({
            "In Compliance Regulatory notification count":
            len(positive_country)
        })
        product_compliance.append({
            "Not in Compliance Regulatory notification count":
            len(negative_country)
        })
        product_compliance.append(
            {"Other Regulatory notification count": len(others)})
        product_compliance.append({"tab_modal": "complianceModal"})
        product_compliance.append(
            {"AG Registration active region status ": active_str})
        home_page_details["Product compliance"] = product_compliance

        #customer communication
        usflag, euflag = get_customer_communication_info(founded_category)
        customer_comm.append(
            {"image": config.home_icon_customer_communication})
        customer_comm.append({"US FDA Compliance": usflag})
        customer_comm.append({"EU Food Contact ": euflag})
        customer_comm.append({"tab_modal": "communicationModal"})
        home_page_details["Customer Communication"] = customer_comm

        #toxicology
        summary_flag, study_str = get_toxicology_info(founded_category,
                                                      unstructure_df,
                                                      spec_list, std_df,
                                                      unstructure_values)
        toxicology.append({"image": config.home_icon_toxicology})
        toxicology.append({"Study Titles": study_str})
        toxicology.append(
            {"Toxicology Summary Report Available": summary_flag})
        toxicology.append({"tab_modal": "toxicologyModal"})
        home_page_details["Toxicology"] = toxicology

        #restricted_sub
        gadsl_fg, cal_fg = find_restricted_data(unstructure_df,
                                                all_details_json, spec_list,
                                                std_df)
        restricted_sub.append({"image": config.home_icon_restricted_substance})
        restricted_sub.append({"Components Present in GADSL": gadsl_fg})
        restricted_sub.append({"Components Present in Cal Prop 65": cal_fg})
        restricted_sub.append({"tab_modal": "restrictedSubstanceModal"})
        home_page_details["Restricted Substance"] = restricted_sub

        #sales_information
        sales_kg, sold_country = get_sales_volume_info(founded_category,
                                                       unstructure_values)
        sales_information.append({"image": config.home_icon_sales_info})
        sales_information.append(
            {"Total sales volume in 2019 (in Kg)": sales_kg})
        sales_information.append({"Regions where sold": sold_country})
        sales_information.append({"tab_modal": "salesModal"})
        home_page_details["Sales Information"] = sales_information

        #report data
        report_flag = get_report_data_info(spec_list)
        report_data.append({"image": config.home_icon_report_data})
        report_data.append({"Report Status": report_flag})
        report_data.append({"tab_modal": "reportModal"})
        home_page_details["Report Data"] = report_data
    except Exception as e:
        pass
    return home_page_details
コード例 #14
0
def find_legal_composition_details(validity,
                                   cas_query,
                                   spec_query,
                                   req_body,
                                   all_details_json,
                                   cas_list,
                                   spec_with_namprod=""):
    try:
        if spec_with_namprod == "":
            spec_with_namprod = get_specid_namprod_details(all_details_json)
        legal_comp = {"legal_composition": [], "svt": []}
        zusage_value = helper.replace_character_for_querying([validity])
        query = f'CSUBI:({cas_query}) && ZUSAGE:({zusage_value}) && SUBID:({spec_query})'
        json_list = []
        legal_values, legal_df = helper.get_data_from_core(
            config.solr_legal_composition, query)
        legal_df = legal_df[legal_df["CSUBI"].isin(cas_list)]
        legal_svt_spec = []
        legal_comp = {}
        total_legal_value = 0
        for item in req_body.get("CAS_Level"):
            real_spec_list = item.get("real_Spec_Id")
            for real in real_spec_list:
                if spec_query in real:
                    for data in legal_values:
                        json_make = {}
                        if data.get("CSUBI") == item.get("pure_Spec_Id"):
                            legal_svt_spec.append(item.get("pure_Spec_Id"))
                            json_make["real_Spec_Id"] = spec_with_namprod
                            json_make["pure_spec_Id"] = str(
                                item.get("pure_Spec_Id"))
                            json_make["cas_Number"] = item.get("cas_Number")
                            if (str(item.get("chemical_Name")).strip() == "-"
                                    or str(item.get("chemical_Name")).strip()
                                    == ""):
                                if data.get("NAM_CAS_EN", "-") != '-':
                                    chemical_name = data.get("NAM_CAS_EN", "-")
                                else:
                                    chemical_name = data.get(
                                        "NAM_IUPAC_EN", "-")
                            else:
                                chemical_name = item.get("chemical_Name")
                            json_make["ingredient_Name"] = chemical_name
                            json_make["legal_Componant_Type"] = data.get(
                                "COMPT", "-")
                            json_make[
                                "legal_value"] = helper.set_decimal_points(
                                    data.get("CVALU", 0))
                            json_make[
                                "legal_cal_value"] = helper.calculate_ppm_ppb(
                                    data.get("CVALU", 0),
                                    data.get("CUNIT", "-"))
                            if data.get("CVALU", "-") != "-":
                                total_legal_value += float(
                                    json_make.get("legal_cal_value", 0))
                            json_make["legal_unit"] = data.get("CUNIT", "-")
                            json_list.append(json_make)
                    break
        if len(json_list) > 0:
            sorted_dict = sort_cvalue(json_list, "legal_cal_value")
            json_list = []
            for item in sorted_dict:
                json_list.append(sorted_dict.get(item))
            json_make = {}
            json_make["pure_spec_Id"] = "Total"
            json_make["cas_Number"] = ""
            json_make["ingredient_Name"] = ""
            json_make["legal_Componant_Type"] = ""
            json_make["legal_cal_value"] = ""
            json_make["legal_value"] = helper.set_decimal_points(
                total_legal_value)
            json_make["legal_unit"] = ""
            json_list.append(json_make)
        legal_comp["legal_composition"] = json_list
        if validity == 'REACH: REG_REACH':
            json_list = []
            json_make = {}
            svt_result = []
            if len(legal_svt_spec) > 0:
                subid_list = (config.or_delimiter).join(legal_svt_spec)
                query = f'SUBID:({subid_list})'
                svt_result, svt_df = helper.get_data_from_core(
                    config.solr_substance_volume_tracking, query)
            presence_id = []
            if "SUBID" in svt_df.columns:
                presence_id = list(svt_df["SUBID"].unique())
            for sub in presence_id:
                json_make["real_Spec_Id"] = spec_with_namprod
                json_make["pure_spec_Id"] = sub
                svt_total_2018 = 0
                svt_total_2019 = 0
                svt_total_2020 = 0
                for data in svt_result:
                    if sub == data.get("SUBID", "-"):
                        reg_value = data.get("REGLT", "-")
                        reg_year = data.get("QYEAR", "-").strip()
                        if reg_value == "SVT_TE":
                            if reg_year == "2018":
                                json_make[
                                    "SVT_TE_eight"] = helper.set_decimal_points(
                                        data.get("CUMQT", 0))
                            if reg_year == "2019":
                                json_make[
                                    "SVT_TE_nine"] = helper.set_decimal_points(
                                        data.get("CUMQT", 0))
                            if reg_year == "2020":
                                json_make[
                                    "SVT_TE_twenty"] = helper.set_decimal_points(
                                        data.get("CUMQT", 0))
                            json_make[
                                "amount_limit_SVT_TE"] = helper.set_decimal_points(
                                    data.get("AMTLT", 0))
                        if reg_value == "SVT_AN":
                            if reg_year == "2018":
                                json_make[
                                    "SVT_AN_eight"] = helper.set_decimal_points(
                                        data.get("CUMQT", 0))
                            if reg_year == "2019":
                                json_make[
                                    "SVT_AN_nine"] = helper.set_decimal_points(
                                        data.get("CUMQT", 0))
                            if reg_year == "2020":
                                json_make[
                                    "SVT_AN_twenty"] = helper.set_decimal_points(
                                        data.get("CUMQT", 0))
                            json_make[
                                "amount_limit_SVT_AN"] = helper.set_decimal_points(
                                    data.get("AMTLT", 0))
                        if reg_value == "SVT_LV":
                            if reg_year == "2018":
                                svt_total_2018 += float(data.get("CUMQT", "-"))
                                json_make[
                                    "SVT_LV_eight"] = helper.set_decimal_points(
                                        svt_total_2018)
                            if reg_year == "2019":
                                svt_total_2019 += float(data.get("CUMQT", "-"))
                                json_make[
                                    "SVT_LV_nine"] = helper.set_decimal_points(
                                        svt_total_2019)
                            if reg_year == "2020":
                                svt_total_2020 += float(data.get("CUMQT", "-"))
                                json_make[
                                    "SVT_LV_twenty"] = helper.set_decimal_points(
                                        svt_total_2020)
                            json_make[
                                "amount_limit_SVT_LV"] = helper.set_decimal_points(
                                    data.get("AMTLT", 0))
                json_list.append(json_make)
                json_make = {}
            json_make = {}
            for item in range(len(json_list)):
                if json_list[item].get("SVT_TE_eight") is None:
                    json_list[item]["SVT_TE_eight"] = "-"
                if json_list[item].get("SVT_TE_nine") is None:
                    json_list[item]["SVT_TE_nine"] = "-"
                if json_list[item].get("SVT_TE_twenty") is None:
                    json_list[item]["SVT_TE_twenty"] = "-"
                if json_list[item].get("amount_limit_SVT_TE") is None:
                    json_list[item]["amount_limit_SVT_TE"] = "-"
                if json_list[item].get("SVT_AN_eight") is None:
                    json_list[item]["SVT_AN_eight"] = "-"
                if json_list[item].get("SVT_AN_nine") is None:
                    json_list[item]["SVT_AN_nine"] = "-"
                if json_list[item].get("SVT_AN_twenty") is None:
                    json_list[item]["SVT_AN_twenty"] = "-"
                if json_list[item].get("amount_limit_SVT_AN") is None:
                    json_list[item]["amount_limit_SVT_AN"] = "-"
                if json_list[item].get("SVT_LV_eight") is None:
                    json_list[item]["SVT_LV_eight"] = "-"
                if json_list[item].get("SVT_LV_nine") is None:
                    json_list[item]["SVT_LV_nine"] = "-"
                if json_list[item].get("SVT_LV_twenty") is None:
                    json_list[item]["SVT_LV_twenty"] = "-"
                if json_list[item].get("amount_limit_SVT_LV") is None:
                    json_list[item]["amount_limit_SVT_LV"] = "-"
            legal_comp["svt"] = json_list
        else:
            legal_comp["svt"] = []
        return legal_comp
    except Exception as e:
        return legal_comp
コード例 #15
0
def find_std_hundrd_composition_details(validity,
                                        cas_query,
                                        spec_query,
                                        req_body,
                                        all_details_json,
                                        spec_with_namprod=""):
    zusage_value = helper.replace_character_for_querying([validity])
    #finding product details
    if spec_with_namprod == "":
        spec_with_namprod = get_specid_namprod_details(all_details_json)
    query = f'CSUBI:({cas_query}) && ZUSAGE:({zusage_value}) && SUBID:({spec_query})'
    json_list = []
    # std_result=[]
    # hundrd_result=[]
    # inci_result=[]
    std_values, std_df = helper.get_data_from_core(config.solr_std_composition,
                                                   query)
    hund_values, hund_df = helper.get_data_from_core(
        config.solr_hundrd_composition, query)
    cidp_query = helper.unstructure_template(all_details_json, ["CIDP"])
    params = {"fl": config.unstructure_column_str}
    cidp_values, cidp_df = helper.get_data_from_core(
        config.solr_unstructure_data, cidp_query, params)
    for item in req_body.get("CAS_Level"):
        real_spec_list = item.get("real_Spec_Id")
        for real in real_spec_list:
            if spec_query in real:
                std_flag = ''
                hundrd_flag = ''
                inci_flag = ''
                json_make = {}
                std_iupac_name = config.hypen_delimiter
                hundrd_iupac_name = config.hypen_delimiter
                for std in std_values:
                    if (std.get("CSUBI").strip() == item.get("pure_Spec_Id")):
                        std_flag = 's'
                        json_make["std_Componant_Type"] = std.get("COMPT", "-")
                        json_make["std_value"] = helper.set_decimal_points(
                            std.get("CVALU", 0))
                        json_make["std_unit"] = std.get("CUNIT", "-")
                        std_iupac_name = std.get("NAM_IUPAC_EN", "-")
                        std_cas_name = std.get("NAM_CAS_EN", "-")
                        json_make["std_cal_value"] = helper.calculate_ppm_ppb(
                            std.get("CVALU", 0), std.get("CUNIT", "-"))
                for hundrd in hund_values:
                    if hundrd.get("CSUBI").strip() == item.get("pure_Spec_Id"):
                        hundrd_flag = 's'
                        json_make["hundrd_Componant_Type"] = hundrd.get(
                            "COMPT", "-")
                        json_make["hundrd_value"] = helper.set_decimal_points(
                            hundrd.get("CVALU", 0))
                        json_make["hundrd_unit"] = hundrd.get("CUNIT", "-")
                        hundrd_iupac_name = std.get("NAM_IUPAC_EN", "-")
                        hundrd_cas_name = std.get("NAM_CAS_EN", "-")
                        json_make[
                            "hundrd_cal_value"] = helper.calculate_ppm_ppb(
                                hundrd.get("CVALU", 0),
                                hundrd.get("CUNIT", "-"))
                for inci in cidp_values:
                    data = json.loads(inci.get("DATA_EXTRACT"))
                    inci_cas_number = data.get("CAS Number ").strip()
                    if inci_cas_number == item.get("cas_Number"):
                        inci_flag = 's'
                        json_make["inci_Componant_Type"] = "Active"
                        json_make["inci_value_unit"] = data.get(
                            "Target Composition", "-")
                if std_flag == '':
                    json_make["std_Componant_Type"] = '-'
                    json_make["std_value"] = 0
                    json_make["std_unit"] = "-"
                    json_make["std_cal_value"] = 0
                if hundrd_flag == '':
                    json_make["hundrd_Componant_Type"] = "-"
                    json_make["hundrd_value"] = 0
                    json_make["hundrd_unit"] = "-"
                    json_make["hundrd_cal_value"] = 0
                if inci_flag == '':
                    json_make["inci_Componant_Type"] = "-"
                    json_make["inci_value_unit"] = "-"
                if std_flag == 's' or hundrd_flag == 's' or inci_flag == 's':
                    json_make["real_Spec_Id"] = spec_with_namprod
                    json_make["pure_spec_Id"] = str(item.get("pure_Spec_Id"))
                    json_make["cas_Number"] = item.get("cas_Number")
                    if (str(item.get("chemical_Name")).strip() == "-"
                            or str(item.get("chemical_Name")).strip() == ""):
                        if std_cas_name != "-":
                            chemical_name = std_cas_name
                        else:
                            chemical_name = std_iupac_name
                    else:
                        chemical_name = item.get("chemical_Name")
                    json_make["ingredient_Name"] = chemical_name
                    json_list.append(json_make)
                break
    if len(json_list) > 0:
        total_std_value = 0
        total_hundrd_value = 0
        total_inci_value = 0
        for item in json_list:
            try:
                if float(item.get("std_cal_value")) > 0:
                    total_std_value += float(item.get("std_cal_value"))
                if float(item.get("hundrd_cal_value")) > 0:
                    total_hundrd_value += float(item.get("hundrd_cal_value"))
                if item.get("inci_value_unit") != "-":
                    inci_list = [
                        incv for incv in str(item.get("inci_value_unit"))
                        if (incv.isdigit() or incv == ".")
                    ]
                    inci_str = "".join(inci_list)
                    total_inci_value += float(inci_str)
            except Exception as e:
                pass
        # #sort desceding order
        if len(json_list) > 0:
            sorted_dict = sort_cvalue(json_list, "std_cal_value")
            # std_hund_result = json.dumps(json_list)
            # std_hund_df=pd.read_json(std_hund_result,dtype=str)
            # sorted_df=std_hund_df.sort_values(by=['std_cal_value'],ascending=False)
            # sorted_dict=json.loads(sorted_df.to_json(orient='index'))
            json_list = []
            for item in sorted_dict:
                json_list.append(sorted_dict.get(item))
        json_make = {}
        json_make["pure_spec_Id"] = "Total"
        json_make["cas_Number"] = ""
        json_make["ingredient_Name"] = ""
        json_make["std_Componant_Type"] = ""
        json_make["std_value"] = helper.set_decimal_points(total_std_value)
        json_make["std_cal_value"] = ""
        json_make["std_unit"] = ""
        json_make["hundrd_Componant_Type"] = ""
        json_make["hundrd_value"] = helper.set_decimal_points(
            total_hundrd_value)
        json_make["hundrd_cal_value"] = ""
        json_make["hundrd_unit"] = ""
        json_make["inci_Componant_Type"] = ""
        if str(total_inci_value) == '0':
            inci_total_decimal_value = "-"
        else:
            inci_total_decimal_value = helper.set_decimal_points(
                total_inci_value)
        json_make["inci_value_unit"] = inci_total_decimal_value
        json_list.append(json_make)
    return json_list
コード例 #16
0
def get_assigned_ontology_document(req_body):
    try:
        output_json=[]
        count=0
        sub_category=req_body.get("Category_details").get("Subcategory")
        out_template={
            "US-FDA":{
                "US-FDA":[],
                "category":"US-FDA"
            },"EU-FDA":{
                "EU-FDA":[],"category":"EU-FDA"
            },"Toxicology-summary":{
                "Toxicology-summary":[],"category":"Toxicology Summary"
            },"CIDP":{
                "CIDP":[],"category":"CIDP"
            },"Toxicology":{
                "Toxicology":[],"category":"Toxicology"
            }
        }
        category=config.ontology_assigned_category
        if sub_category=="assigned":
            all_details_json,spec_list,material_list = helper.construct_common_level_json(req_body)
            ontology_query=helper.unstructure_template(all_details_json,category)
        elif sub_category=="unassigned":
            spec_list=[]
            category_list=" || ".join(category)
            ontology_query=f'CATEGORY:({category_list}) && IS_RELEVANT:0'  
        params={"fl":config.unstructure_column_str}
        unstructure_values,unstructure_df=helper.get_data_from_core(config.solr_unstructure_data,ontology_query,params)        
        if len(unstructure_values)>0:
            for item in unstructure_values:
                try:
                    unstructure_category=item.get("CATEGORY","")
                    datastr=json.loads(item.get("DATA_EXTRACT",{}))
                    path=str(datastr.get("file_path",config.hypen_delimiter)).strip()
                    if path.lower().endswith("pdf"):
                        result_spec=item.get("SPEC_ID")
                        product=item.get("PRODUCT",config.hypen_delimiter)
                        product_type=item.get("PRODUCT_TYPE",config.hypen_delimiter)
                        spec_id=helper.finding_spec_details(spec_list,result_spec) 
                        filename=datastr.get("file_name",config.hypen_delimiter)
                        file_split=path.split("/")
                        file_source=''
                        for source in config.file_sources:
                            if source in file_split:
                                file_source=source
                                break
                        json_make={}
                        count+=1
                        date=datastr.get("Date",config.hypen_delimiter)
                        json_make["fileName"]=filename
                        json_make["file_source"]=file_source
                        json_make["category"]=unstructure_category
                        json_make["spec_Id"]=spec_id
                        if sub_category=="unassigned":
                            product="No-key-value"
                        json_make["productName"]=product
                        json_make["product_Key"]=product_type
                        json_make["id"]=count
                        json_make["createdDate"]=date
                        json_make["data_extract"]=datastr
                        json_make["sql_Id"]=item.get("ID",'0')
                        json_make["solr_Id"]=item.get("solr_id",config.hypen_delimiter)
                        path=helper.replace_char_in_url(path)
                        json_make["url"]=(config.blob_file_path)+path.replace("/dbfs/mnt/","")+(config.sas_token)
                        extract_field={}
                        extract_field["ontologyKey"]=product
                        for efield in datastr:
                            if efield not in config.otherfields:
                                extract_field[efield]=datastr.get(efield,config.hypen_delimiter)
                        json_make["Extract_Field"]=extract_field
                        out_template[unstructure_category][unstructure_category].append(json_make)
                except Exception as e:
                    pass
        for item in category:
            if len(out_template.get(item).get(item))>0:
                output_json.append(out_template.get(item))
        return output_json
    except Exception as e:
        return output_json
コード例 #17
0
def get_toxicology_details(req_body):
    try:
        sub_category = req_body.get("Category_details").get("Subcategory")
        json_list = []
        if sub_category in config.toxicology_category:
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            std, std_df, legal, legal_df = helper.make_common_query_for_std_legal_composition(
                all_details_json)
            category = config.toxicology_dict.get(sub_category)
            toxicology_query = helper.unstructure_template(
                all_details_json, category)
            params = {"fl": config.unstructure_column_str}
            unstructure_values, unstructure_df = helper.get_data_from_core(
                config.solr_unstructure_data, toxicology_query, params)
            selant = []
            silanes = []
            tox_study = []
            if len(unstructure_values) > 0:
                for item in unstructure_values:
                    try:
                        json_make = {}
                        result_spec = item.get("SPEC_ID")
                        ontology_value = item.get("ONTOLOGY_VALUE", "")
                        spec_id = helper.finding_spec_details(
                            spec_list, result_spec)
                        product = item.get("PRODUCT", config.hypen_delimiter)
                        product_type = item.get("PRODUCT_TYPE",
                                                config.hypen_delimiter)
                        extract_data = item.get("DATA_EXTRACT", "")
                        import json
                        datastr = json.loads(extract_data)
                        category = item.get("CATEGORY", "")
                        file_path = datastr.get("file_path",
                                                config.hypen_delimiter)
                        file_path = helper.replace_char_in_url(file_path)
                        file_split = file_path.split("/")
                        file_source = ''
                        for source in config.file_sources:
                            if source in file_split:
                                file_source = source
                                break
                        std_find = []
                        legal_find = []
                        std_flag = "No"
                        legal_flag = "No"
                        json_make["product_Name"] = product
                        json_make["product_Type"] = product_type
                        json_make["file_Source"] = file_source
                        json_make["ontology_value"] = ontology_value
                        json_make["spec_Id"] = spec_id
                        if sub_category == "Study Title and Date":
                            #checking std and legal compositon condition
                            if product_type in ["NUMCAS"]:
                                specid_list = spec_id.split(
                                    config.pipe_delimitter)
                                if "CAS" in list(
                                        std_df.columns) and "SUBID" in list(
                                            std_df.columns):
                                    std_find = std_df[
                                        (std_df["CAS"] == product)
                                        & (std_df["SUBID"].isin(specid_list))]
                                elif "CAS" in list(
                                        legal_df.columns) and "SUBID" in list(
                                            legal_df.columns):
                                    legal_find = legal_df[
                                        (legal_df["CAS"] == product) &
                                        (legal_df["SUBID"].isin(specid_list))]
                                if len(std_find) == 0 and len(legal_find) == 0:
                                    continue
                                else:
                                    if len(std_find) > 0:
                                        std_flag = "Yes"
                                    if len(legal_find) > 0:
                                        legal_flag = "Yes"
                                    json_make["standardComposition"] = std_flag
                                    json_make["legalComposition"] = legal_flag
                            json_make["test_Description"] = ""
                            json_make["filename"] = datastr.get(
                                "file_name", config.hypen_delimiter)
                            if file_path != '':
                                path = config.blob_file_path + file_path.replace(
                                    "/dbfs/mnt/", "") + config.sas_token
                            else:
                                path = ''
                            json_make["file_Path"] = path
                            extract_field = {}
                            extract_field["study_Title"] = datastr.get(
                                "Study Title", config.hypen_delimiter)
                            extract_field["final_Report"] = datastr.get(
                                "Issue Date", config.hypen_delimiter)
                            json_make["extract_Field"] = extract_field
                            json_list.append(json_make)
                        elif sub_category == "Monthly Toxicology Study List":
                            json_make["product_Commercial_Name"] = product
                            json_make["date"] = datastr.get(
                                "date", config.hypen_delimiter)
                            if category == "tox_study_silanes":
                                studies = datastr.get("Studies",
                                                      config.hypen_delimiter)
                                status = datastr.get("Status",
                                                     config.hypen_delimiter)
                                comments = datastr.get("Comments",
                                                       config.hypen_delimiter)
                                if studies != None or status != None or comments != None:
                                    json_make["studies"] = studies
                                    json_make["status"] = status
                                    json_make["comments"] = comments
                                    json_make["test"] = config.hypen_delimiter
                                    json_make[
                                        "actions"] = config.hypen_delimiter
                                    json_make["segment"] = "Silanes"
                                else:
                                    continue
                            elif category == "tox_study_selant":
                                json_make["test"] = datastr.get(
                                    "Test", config.hypen_delimiter)
                                json_make["actions"] = datastr.get(
                                    "Actions", config.hypen_delimiter)
                                json_make["studies"] = config.hypen_delimiter
                                json_make["status"] = config.hypen_delimiter
                                json_make["comments"] = config.hypen_delimiter
                                json_make["segment"] = "Sealant"
                            json_list.append(json_make)
                        elif sub_category == "Toxicology Summary":
                            if product_type in ["NUMCAS"]:
                                specid_list = spec_id.split(
                                    config.pipe_delimitter)
                                if "CAS" in list(
                                        std_df.columns) and "SUBID" in list(
                                            std_df.columns):
                                    std_find = std_df[
                                        (std_df["CAS"] == product)
                                        & (std_df["SUBID"].isin(specid_list))]
                                if len(std_find) == 0:
                                    continue
                                else:
                                    if len(std_find) > 0 and "CVALU" in list(
                                            std_find.columns):
                                        std_cvalue = std_find[[
                                            "CVALU", "CUNIT"
                                        ]]
                                        std_cvalu_list = std_cvalue.values.tolist(
                                        )
                                        for value, unit in std_cvalu_list:
                                            cal_value = helper.calculate_ppm_ppb(
                                                value, unit)
                                            if cal_value > 30:
                                                std_flag = "Yes"
                                                json_make[
                                                    "standardComposition"] = std_flag
                                                json_make[
                                                    "compositionValue"] = value
                                                json_make[
                                                    "compositionUnit"] = unit
                                        if std_flag != 'Yes':
                                            continue
                            json_make["date_Of_Issue"] = datastr.get(
                                "Date", config.hypen_delimiter)
                            path = config.blob_file_path + file_path.replace(
                                "/dbfs/mnt/", "") + config.sas_token
                            json_make["filename"] = datastr.get(
                                "file_name", config.hypen_delimiter)
                            json_make["file_Path"] = path
                            json_list.append(json_make)
                    except Exception as e:
                        pass
            if sub_category == "Monthly Toxicology Study List":
                monthly_studies = {}
                if len(json_list) > 0:
                    json_list = sort_date(json_list)
        elif sub_category == "Toxicology Registration Tracker":
            if ("tonnage_band") not in req_body and (
                    "product" not in req_body) and ("country" not in req_body):
                product_list = []
                tonnage_band_list = []
                country_list = []
                product_list = get_product_list()
                country_list = get_country_list()
                tonnage_band_list = get_tonnage_band_list()
                json = {
                    "product": product_list,
                    "country": country_list,
                    "tonnage_band": tonnage_band_list
                }
                json_list.append(json)
            else:
                tonnage_band_limit = req_body.get("tonnage_band", "ALL")
                product_name = req_body.get("product").get("name", "")
                country = req_body.get("country", [])
                tracker_query = config.registration_tracker_query.format(
                    tonnage_band_limit, product_name)
                group_test_query = config.select_query.format(
                    config.view_connector, config.group_test_view_name)
                #get data from sql
                tracker_sql_df, tracker_sql_list = helper.get_data_from_sql_table(
                    tracker_query, "yes")
                group_test_df, group_test_list = helper.get_data_from_sql_table(
                    group_test_query, "yes")
                if len(tracker_sql_df) > 0:
                    # country_cost=get_country_subtotal(tracker_sql_df)
                    #find total cost
                    if len(country) > 0 and country[0] != "ALL":
                        tracker_sql_df["CountryName"] = tracker_sql_df[
                            "CountryName"].str.strip()
                        tracker_sql_df = tracker_sql_df[
                            tracker_sql_df["CountryName"].isin(country)]

                    if "EstimatedCost" in tracker_sql_df.columns and "Completed" in tracker_sql_df.columns:
                        cost_df = tracker_sql_df[
                            tracker_sql_df["Completed"].str.contains(
                                "no", case=False)]
                        total_cost = cost_df["EstimatedCost"].sum()
                    else:
                        total_cost = 0
                    study_type_cost = get_study_type_subtotal(tracker_sql_df)
                    total_estimated_time = get_estimated_time(
                        tracker_sql_df, group_test_df)
                    registartion_tracker_list = get_json_format_data(
                        tracker_sql_list, country)
                    json_make = {
                        "total_Cost":
                        str(helper.set_two_decimal_points(total_cost)),
                        "study_Type_Cost":
                        study_type_cost,
                        "total_Estimated_Time":
                        str(helper.set_two_decimal_points(
                            total_estimated_time)),
                        "registartion_Data":
                        registartion_tracker_list
                    }
                    json_list.append(json_make)
コード例 #18
0
def get_restricted_data_details(req_body):
    try:
        logging.info("restricted_substance request" + f'{req_body}')
        all_details_json, spec_list, material_list = helper.construct_common_level_json(
            req_body)
        std, std_df, legal, legal_df = helper.make_common_query_for_std_legal_composition(
            all_details_json)
        sub_category = req_body.get("Category_details").get("Subcategory")
        category = config.restricted_dict.get(sub_category)
        restricted_substance_query = helper.unstructure_template(
            all_details_json, [category])
        params = {"fl": config.unstructure_column_str}
        unstructure_values, unstructure_df = helper.get_data_from_core(
            config.solr_unstructure_data, restricted_substance_query, params)
        restricted_details = []
        if len(unstructure_values) > 0:
            if sub_category == "GADSL":
                for item in unstructure_values:
                    gadsl_cas = item.get("PRODUCT", "")
                    result_spec = item.get("SPEC_ID")
                    spec_id = helper.finding_spec_details(
                        spec_list, result_spec)
                    product_type = item.get("PRODUCT_TYPE",
                                            config.hypen_delimiter)
                    std_wg, componant_type = helper.find_std_weight(
                        gadsl_cas, product_type, spec_id, std_df)
                    if std_wg == '':
                        continue
                    data = json.loads(item.get("DATA_EXTRACT"))
                    gadsl_json = {
                        "substance":
                        str(data.get("Substance", config.hypen_delimiter)),
                        "cas_NO":
                        gadsl_cas,
                        "class_action":
                        "",
                        "reason_Code":
                        str(data.get("Reason Code", config.hypen_delimiter)),
                        "source":
                        str(
                            data.get(
                                "Source (Legal requirements, regulations)",
                                config.hypen_delimiter)),
                        "reporting_threshold":
                        str(
                            data.get(
                                "Reporting threshold (0.1% unless otherwise stated)",
                                config.hypen_delimiter)),
                        "weight_Composition":
                        std_wg,
                        "spec_Id":
                        spec_id
                    }
                    restricted_details.append(gadsl_json)
                    del gadsl_json
        if sub_category == "CALPROP":
            for item in unstructure_values:
                try:
                    calprop_cas = item.get("PRODUCT", "")
                    result_spec = item.get("SPEC_ID")
                    spec_id = helper.finding_spec_details(
                        spec_list, result_spec)
                    product_type = item.get("PRODUCT_TYPE",
                                            config.hypen_delimiter)
                    std_wg, componant_type = helper.find_std_weight(
                        calprop_cas, product_type, spec_id, std_df)
                    if std_wg == '':
                        continue
                    data = json.loads(item.get("DATA_EXTRACT"))
                    calprop_json = {
                        "chemical":
                        str(data.get("Chemical", config.hypen_delimiter)),
                        "type_Toxicity":
                        str(
                            data.get("Type of Toxicity",
                                     config.hypen_delimiter)),
                        "listing_Mechanism":
                        str(
                            data.get("Listing Mechanism",
                                     config.hypen_delimiter)),
                        "cas_NO":
                        calprop_cas,
                        "date_Listed":
                        str(data.get("Date Listed", config.hypen_delimiter)),
                        "NSRL_Data":
                        str(
                            data.get("NSRL or MADL (æg/day)a",
                                     config.hypen_delimiter)),
                        "weight_Composition":
                        std_wg,
                        "componant_Type":
                        componant_type,
                        "spec_Id":
                        spec_id
                    }
                    restricted_details.append(calprop_json)
                    del calprop_json
                except Exception as e:
                    del calprop_json
            #find CAS details in generic json
            generic_cas_info_list = helper.get_generic_cas_details(
                all_details_json)
            for item in generic_cas_info_list:
                try:
                    cas_no = item.get("cas_no", config.hypen_delimiter)
                    generic_spec_list = [
                        element for element in all_details_json if cas_no in
                        all_details_json.get(element).get("cas_number")
                    ]
                    spec_str = (config.pipe_delimitter).join(generic_spec_list)
                    std_wg, componant_type = helper.find_std_weight(
                        cas_no, "NUMCAS", spec_str, std_df)
                    if std_wg == '':
                        continue
                    calprop_json = {
                        "chemical":
                        str(item.get("chemical_name", config.hypen_delimiter)),
                        "type_Toxicity":
                        str(item.get("toxicity_type", config.hypen_delimiter)),
                        "listing_Mechanism":
                        str(
                            item.get("listing_mechanism",
                                     config.hypen_delimiter)),
                        "cas_NO":
                        cas_no,
                        "date_Listed":
                        str(item.get("date_listed", config.hypen_delimiter)),
                        "NSRL_Data":
                        str(item.get("NSRL_MADL", config.hypen_delimiter)),
                        "weight_Composition":
                        std_wg,
                        "componant_Type":
                        componant_type,
                        "spec_Id":
                        spec_str
                    }
                    restricted_details.append(calprop_json)
                    del calprop_json
                except Exception as e:
                    del calprop_json

        return restricted_details
    except Exception as e:
        return []
コード例 #19
0
def get_sales_data_details(req_body):
    try:
        logging.info("sales info request" + f'{req_body}')
        print(req_body)
        category = ["SAP-BW"]
        material_level_data = req_body.get("Mat_Level")
        all_details_json, spec_list, material_list = helper.construct_common_level_json(
            req_body)
        ret = helper.make_common_query_for_std_legal_composition(
            all_details_json)
        sale_info_query = helper.unstructure_template(all_details_json,
                                                      category)
        params = {"fl": config.unstructure_column_str}
        result, result_df = helper.get_data_from_core(solr_unstructure_data,
                                                      sale_info_query, params)
        sales_list = []
        sales_kg = 0
        sales_org = []
        region = []
        material_flag = ''
        for mat_id in material_list:
            total_2017 = 0
            total_2018 = 0
            total_2019 = 0
            total_2020 = 0
            try:
                for data in result:
                    try:
                        material_number = data.get("PRODUCT", "")
                        if material_number == mat_id:
                            material_flag = 's'
                            result_spec = data.get("SPEC_ID", "")
                            spec_id = helper.finding_spec_details(
                                spec_list, result_spec)
                            data_extract = json.loads(
                                data.get("DATA_EXTRACT", {}))
                            sales_org_str = data_extract.get(
                                "Sales Organization", "")
                            if sales_org != None:
                                sales_org.append(sales_org_str)
                            sales_kg_str = data_extract.get("SALES KG", 0)
                            if sales_kg_str != None:
                                sales_kg = sales_kg + float(sales_kg_str)
                            sold_str = data_extract.get(
                                "Sold-to Customer Country", "")
                            if sold_str != None:
                                region.append(sold_str)
                            year_split = str(
                                data_extract.get("Fiscal year/period",
                                                 "-")).split(".")
                            if len(year_split) > 1:
                                year = year_split[1]
                                if year == "2017":
                                    total_2017 = total_2017 + float(
                                        sales_kg_str)
                                elif year == "2018":
                                    total_2018 = total_2018 + float(
                                        sales_kg_str)
                                elif year == "2019":
                                    total_2019 = total_2019 + float(
                                        sales_kg_str)
                                elif year == "2020":
                                    total_2020 = total_2020 + float(
                                        sales_kg_str)
                    except Exception as e:
                        pass
                        # material_number=data.get("PRODUCT","")
                if material_flag == 's':
                    desc = []
                    bdt = []
                    for item in material_level_data:
                        try:
                            if item.get("material_Number") == mat_id:
                                desc.append(item.get("description", ""))
                                bdt.append(item.get("bdt", ""))
                        except Exception as e:
                            pass
                    if sales_kg != 0:
                        sales_kg = helper.set_decimal_points(sales_kg)
                    if total_2017 != 0:
                        total_2017 = helper.set_decimal_points(total_2017)
                    if total_2018 != 0:
                        total_2018 = helper.set_decimal_points(total_2018)
                    if total_2019 != 0:
                        total_2019 = helper.set_decimal_points(total_2019)
                    if total_2020 != 0:
                        total_2020 = helper.set_decimal_points(total_2020)
                    sales_json = {
                        "material_number":
                        mat_id,
                        "material_description":
                        (config.comma_delimiter).join(list(set(desc))),
                        "basic_data":
                        (config.comma_delimiter).join(list(set(bdt))),
                        "sales_Org":
                        (config.comma_delimiter).join(list(set(sales_org))),
                        "past_Sales":
                        str(sales_kg),
                        "spec_id":
                        spec_id,
                        "region_sold":
                        (config.comma_delimiter).join(list(set(region))),
                        "total_sale_2017":
                        str(total_2017),
                        "total_sale_2018":
                        str(total_2018),
                        "total_sale_2019":
                        str(total_2019),
                        "total_sale_2020":
                        str(total_2020)
                    }
                    sales_list.append(sales_json)
                    sales_json = {}
                    material_flag = ''
                    sales_kg = 0
                    sales_org = []
                    region = []
            except Exception as e:
                pass
        result_data = {"saleDataProducts": sales_list}
        return [result_data]
    except Exception as e:
        pass
        return []
コード例 #20
0
def get_product_compliance_details(req_body):
    try:
        logging.info("product compliance request" + f'{req_body}')
        # compliance_details=[]
        result = []
        notification_details = []
        all_details_json, spec_list, material_list = helper.construct_common_level_json(
            req_body)
        sub_category = req_body.get("Category_details").get("Subcategory")
        if sub_category == "Notification Status":
            notify = {}
            spec_query = (config.or_delimiter).join(spec_list)
            query = f'SUBID:({spec_query})'
            params = {"fl": config.notification_column_str}
            pcomp, pcomp_df = helper.get_data_from_core(
                config.solr_notification_status, query, params)
            if ("NOTIF" in list(pcomp_df.columns)) and len(pcomp) > 0:
                phrase_key = (list(pcomp_df["NOTIF"].unique()))
                if ("ADDIN" in list(pcomp_df.columns)):
                    phrase_key = phrase_key + (list(
                        pcomp_df["ADDIN"].unique()))
                phrase_split = ";".join(phrase_key)
                phrase_key = phrase_split.split(";")
                phrase_key_query = helper.replace_character_for_querying(
                    phrase_key)
                query = f'PHRKY:({phrase_key_query})'
                params = {"fl": config.phrase_column_str}
                key_value, key_value_df = helper.get_data_from_core(
                    config.solr_phrase_translation, query, params)
            for item in pcomp:
                try:
                    notify = {}
                    notify["regulatory_List"] = str(
                        item.get("RLIST", config.hypen_delimiter)).strip()
                    ntfy_rg_value = str(item.get("NOTIF", "")).strip()
                    ntfy_rg = ntfy_rg_value.split(";")
                    notify_value = [(config.hypen_delimiter)]
                    if ("PTEXT" in list(
                            key_value_df.columns)) and ("PHRKY" in list(
                                key_value_df.columns)):
                        ptext_df = key_value_df[key_value_df["PHRKY"].isin(
                            ntfy_rg)]
                        notify_value = list(ptext_df["PTEXT"])
                    notify["regulatory_Basis"] = "-"
                    notify["notification"] = (
                        config.comma_delimiter).join(notify_value)
                    #add phrse text
                    add_value = str(item.get("ADDIN", "")).strip()
                    add_rg = add_value.split(";")
                    add_list = [(config.hypen_delimiter)]
                    if ("PTEXT" in list(
                            key_value_df.columns)) and ("PHRKY" in list(
                                key_value_df.columns)):
                        add_df = key_value_df[key_value_df["PHRKY"].isin(
                            add_rg)]
                        add_list = list(add_df["PTEXT"])
                    notify["additional_Info"] = (
                        config.comma_delimiter).join(add_list)
                    notify["usage"] = str(item.get("ZUSAGE", "-")).strip()
                    #find spec_id
                    subid = item.get("SUBID", "-")
                    if (all_details_json.get(subid)):
                        nam_list = all_details_json.get(subid).get(
                            "namprod", [])
                        nam_str = (config.comma_delimiter).join(nam_list)
                    else:
                        nam_str = config.hypen_delimiter
                    notify["spec_id"] = subid + (
                        config.hypen_delimiter) + nam_str
                    notification_details.append(notify)
                except Exception as e:
                    pass
            result = notification_details
        elif sub_category == "AG Registration Status":
            eu_json_list = []
            us_json_list = []
            latin_list = []
            category = config.ag_registration_list
            ag_reg_query = helper.unstructure_template(all_details_json,
                                                       category)
            params = {"fl": config.unstructure_column_str}
            unstructure_values, unstructure_df = helper.get_data_from_core(
                config.solr_unstructure_data, ag_reg_query, params)
            for item in unstructure_values:
                try:
                    json_make = {}
                    result_spec = item.get("SPEC_ID")
                    spec_id = helper.finding_spec_details(
                        spec_list, result_spec)
                    region = item.get("CATEGORY").strip()
                    datastr = json.loads(item.get("DATA_EXTRACT"))
                    if region == "EU_REG_STATUS":
                        json_make["product"] = str(
                            item.get("PRODUCT",
                                     config.hypen_delimiter)).strip()
                        json_make["country"] = str(
                            datastr.get("Country",
                                        config.hypen_delimiter)).strip()
                        json_make["holder"] = str(
                            datastr.get("Holder",
                                        config.hypen_delimiter)).strip()
                        json_make["registration"] = str(
                            datastr.get("Registration",
                                        config.hypen_delimiter)).strip()
                        json_make["expiry"] = str(
                            datastr.get("Expiry",
                                        config.hypen_delimiter)).strip()
                        json_make["status"] = str(
                            datastr.get("Status",
                                        config.hypen_delimiter)).strip()
                        json_make["certificate"] = str(
                            datastr.get("Certificate",
                                        config.hypen_delimiter)).strip()
                        json_make["spec_id"] = spec_id
                        eu_json_list.append(json_make)
                    elif region == "US_REG_STATUS":
                        json_make["product"] = str(
                            item.get("PRODUCT",
                                     config.hypen_delimiter)).strip()
                        json_make["EPA_Inert_Product_Listing"] = str(
                            datastr.get("EPA Inert Product Listing",
                                        config.hypen_delimiter)).strip()
                        json_make["CA_DPR"] = str(
                            datastr.get("CA DPR",
                                        config.hypen_delimiter)).strip()
                        json_make["CP_DA"] = str(
                            datastr.get("CPDA",
                                        config.hypen_delimiter)).strip()
                        json_make["WSDA"] = str(
                            datastr.get("WSDA",
                                        config.hypen_delimiter)).strip()
                        json_make["OMRI"] = str(
                            datastr.get("OMRI",
                                        config.hypen_delimiter)).strip()
                        json_make["OMRI_Reneval_Date"] = str(
                            datastr.get("OMRI Renewal Date",
                                        config.hypen_delimiter)).strip()
                        json_make["Canada_OMRI"] = str(
                            datastr.get("Canada OMRI",
                                        config.hypen_delimiter)).strip()
                        json_make["PMRA"] = str(
                            datastr.get("PMRA",
                                        config.hypen_delimiter)).strip()
                        json_make["spec_id"] = spec_id
                        us_json_list.append(json_make)
                    elif region == "LATAM_REG_STATUS":
                        json_make["product"] = str(
                            item.get("PRODUCT",
                                     config.hypen_delimiter)).strip()
                        json_make["country"] = str(
                            datastr.get("Country",
                                        config.hypen_delimiter)).strip()
                        json_make["registered_Name"] = str(
                            datastr.get("Registered Name",
                                        config.hypen_delimiter)).strip()
                        json_make["date_Granted"] = str(
                            datastr.get("Date Granted",
                                        config.hypen_delimiter)).strip()
                        json_make["date_Of_Expiry"] = str(
                            datastr.get("Date of Expiry",
                                        config.hypen_delimiter)).strip()
                        json_make["registration_Holder"] = str(
                            datastr.get("Holder",
                                        config.hypen_delimiter)).strip()
                        json_make["registration_Certificate"] = str(
                            datastr.get("Registration Certificate (Location)",
                                        config.hypen_delimiter)).strip()
                        json_make["spec_id"] = spec_id
                        latin_list.append(json_make)
                    del json_make
                except Exception as e:
                    pass
            ag_make = {}
            ag_make["complianceRegistrationEUData"] = eu_json_list
            ag_make["complianceRegistrationCanada_Data"] = us_json_list
            ag_make["complianceRegistrationLatin_Data"] = latin_list
            result.append(ag_make)

        return result
    except Exception as e:
        return result
コード例 #21
0
def get_customer_communication_details(req_body):
    try:
        all_details_json,spec_list,material_list = helper.construct_common_level_json(req_body)
        sub_category=req_body.get("Category_details").get("Subcategory")
        json_list=[]
        if sub_category in config.customer_communication_category:
            category=config.customer_communication_category.get(sub_category)
            communication_query=helper.unstructure_template(all_details_json,category)
            params={"fl":config.unstructure_column_str}
            unstructure_values,unstructure_df=helper.get_data_from_core(config.solr_unstructure_data,communication_query,params)        
            if len(unstructure_values)>0:
                count=0
                for item in unstructure_values:
                    try:
                        json_make={}
                        datastr={}
                        datastr=json.loads(item.get("DATA_EXTRACT",{}))
                        result_spec=item.get("SPEC_ID")
                        product=item.get("PRODUCT",config.hypen_delimiter)
                        product_type=item.get("PRODUCT_TYPE",config.hypen_delimiter)
                        spec_id=helper.finding_spec_details(spec_list,result_spec)     
                        if (sub_category in ["US FDA Letter","EU Food Contact"]):
                            path=str(datastr.get("file_path",config.hypen_delimiter)).strip()
                            if (path.lower().endswith("pdf")):
                                file_split=path.split("/")
                                file_source=''
                                for source in config.file_sources:
                                    if source in file_split:
                                        file_source=source
                                        break
                                count+=1
                                extract_field={}
                                for efield in datastr:
                                    if efield not in config.otherfields:
                                        extract_field[efield]=datastr.get(efield,config.hypen_delimiter)
                                json_make["Extract_Field"]=extract_field
                                filename=datastr.get("file_name",config.hypen_delimiter)          
                                date=datastr.get("Date",config.hypen_delimiter)
                                json_make["spec_Id"]=spec_id
                                json_make["fileName"]=filename
                                json_make["file_Source"]=file_source
                                json_make["product_Type"]=product_type
                                json_make["productName"]=product
                                json_make["id"]=count
                                json_make["createdDate"]=date
                                path=helper.replace_char_in_url(path)
                                json_make["url"]=(config.blob_file_path)+path.replace("/dbfs/mnt/","")+(config.sas_token)
                                json_list.append(json_make)
                        elif sub_category=="Heavy Metals content":
                            datastr=json.loads(datastr)
                            path=datastr.get("file_path","")
                            file_split=path.split("/")
                            file_source=''
                            for source in config.file_sources:
                                if source in file_split:
                                    file_source=source
                                    break
                            json_make["spec_Id"]=spec_id
                            json_make["file_Source"]=file_source
                            json_make["product"]=product
                            json_make["product_Type"]=product_type
                            json_make["aka"]=remove_nan(datastr.get("AKA",config.hypen_delimiter))
                            json_make["batch"]=remove_nan(datastr.get("Batch #",config.hypen_delimiter))
                            json_make["sample"]=remove_nan(datastr.get("Sample #",config.hypen_delimiter))
                            json_make["system"]=remove_nan(datastr.get("System",config.hypen_delimiter))
                            json_make["date"]=remove_nan(datastr.get("Date",config.hypen_delimiter))
                            json_make["aluminium_Al"]=remove_nan(datastr.get("Aluminum (Al)",config.hypen_delimiter))
                            json_make["antimony_Sb"]=remove_nan(datastr.get("Antimony (Sb)",config.hypen_delimiter))
                            json_make["arsenic_As"]=remove_nan(datastr.get("Arsenic (As)",config.hypen_delimiter))
                            json_make["barium_Ba"]=remove_nan(datastr.get("Barium (Ba)",config.hypen_delimiter))
                            json_make["beryllium_Be"]=remove_nan(datastr.get("Beryllium (Be)",config.hypen_delimiter))
                            json_make["boron_B"]=remove_nan(datastr.get("Boron (B)",config.hypen_delimiter))
                            json_make["cadmium_Cd"]=remove_nan(datastr.get("Cadmium (Cd)",config.hypen_delimiter))
                            json_make["calcium_Ca"]=remove_nan(datastr.get("Calcium (Ca)",config.hypen_delimiter))
                            json_make["carbon"]=remove_nan(datastr.get("Carbon",config.hypen_delimiter))
                            json_list.append(json_make)
                    except Exception as e:
                        pass       
        # elif sub_category=="Communication History" and ("case_Number" not in req_body) and ("selected_level" in req_body):
        elif sub_category=="Communication History" and ("case_Number" not in req_body):
            logging.info(f'communication req body {req_body}')
            sfdc_query=helper.sfdc_template(all_details_json)
            params={"fl":config.sfdc_column_str}
            sfdc_values,sfdc_df=helper.get_data_from_core(config.solr_sfdc,sfdc_query,params)        
            if len(sfdc_values)>0 and ("CASENUMBER" in list(sfdc_df.columns)):
                if len(sfdc_df.columns)!=len(config.sfdc_column):
                    dummy=pd.DataFrame([],columns=config.sfdc_column)
                    sfdc_df=pd.concat([sfdc_df,dummy])
                case_df=sfdc_df[config.sfdc_case_call]
                case_df.drop_duplicates(inplace=True)
                case_df=case_df.fillna(config.hypen_delimiter)
                case_df=case_df.replace({"NULL":"-"})
                for index, row in case_df.iterrows():
                    json_make={}
                    json_make["case_Number"]=row["CASENUMBER"]
                    json_make["manufacturing_Plant"]=row["MANUFACTURINGPLANT"]
                    json_make["customer_Name"]=row["ACCOUNTNAME"]
                    json_make["key"]=row["MATCHEDPRODUCTVALUE"]
                    json_make["product_Type"]=row["MATCHEDPRODUCTCATEGORY"]
                    json_make["topic"]=row["REASON"]
                    json_make["tier_2_Owner"]=row["SOP_TIER_2_OWNER__C"]
                    json_make["bu"]=row["BU"]
                    json_list.append(json_make) 
        elif sub_category=="Communication History" and ("case_Number" in req_body):
            selected_case=req_body.get("case_Number")
            sfdc_query=helper.sfdc_template(all_details_json)
            sfdc_query = f'(CASENUMBER:{selected_case}) && '+sfdc_query
            params={"fl":config.sfdc_email_call}
            sfdc_values,sfdc_df=helper.get_data_from_core(config.solr_sfdc,sfdc_query,params)
            for item in sfdc_values:
                json_make={}
                if item.get("CONTACTEMAIL",config.hypen_delimiter)!="NULL":
                    json_make["contact_Email"]=item.get("CONTACTEMAIL",config.hypen_delimiter)
                else:
                    json_make["contact_Email"]=config.hypen_delimiter
                json_make["email_Content"]=item.get("EMAILBODY",config.hypen_delimiter)
                json_make["email_Subject"]=item.get("EMAILSUBJECT","")
                attachment=str(item.get("EMAILATTACHMENT",""))
                attachment_split=attachment.split("|:|")
                add_doc=[]
                for att in attachment_split:
                    if att!="NULL" and att!='' and att!="Not Found":
                        path=att.split("/")
                        filename=(att[1:])
                        # filename=(att[1:]).replace("?","%3F")
                        filename=helper.replace_char_in_url(filename)
                        file=(config.blob_file_path)+filename+(config.sas_token)
                        add_doc.append({"name":path[-1],"url":file})
                json_make["attached_Docs"]=add_doc
                json_list.append(json_make)
        return json_list
    except Exception as e:
        return json_list
コード例 #22
0
def get_product_attributes(req_body):
    try:
        product_attributes_result = []
        json_list = []
        sub_category = req_body.get("Category_details").get("Subcategory")
        validity = req_body.get("Category_details").get("validity")
        if sub_category == "Basic Information":
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            idtxt = []
            #finding Relables
            spec_join = (config.or_delimiter).join(spec_list)
            spec_query = f'SUBID:({spec_join})'
            params = {"fl": config.relable_column_str}
            result, result_df = helper.get_data_from_core(
                config.solr_substance_identifier, spec_query, params)
            if len(result_df.columns) != len(config.relable_column):
                dummy = pd.DataFrame([], columns=config.relable_column)
                result_df = pd.concat([result_df, dummy])
            result_df = result_df.fillna("-")
            result_df = result_df.replace({"NULL": "-"})
            for item in all_details_json:
                json_make = {}
                json_make["spec_id"] = item
                json_make["product_Identification"] = (
                    config.comma_delimiter).join(
                        all_details_json.get(item).get("namprod", []))
                idtxt_df = result_df[(result_df["IDCAT"] == "NAM")
                                     & (result_df["IDTYP"] == "PROD_RLBL") &
                                     (result_df["LANGU"].isin(["E", "", "-"]))
                                     & (result_df["SUBID"] == item)]
                idtxt = list(idtxt_df["IDTXT"].unique())
                if len(idtxt) > 0:
                    json_make["relabels"] = (
                        config.comma_delimiter).join(idtxt)
                else:
                    json_make["relabels"] = "-"
                json_list.append(json_make)
            product_attributes_result.append({"basic_details": json_list})
            #product Application
            json_list = []
            category = ["Prod-App"]
            prod_query = helper.unstructure_template(all_details_json,
                                                     category)
            params = {"fl": config.unstructure_column_str}
            unstructure_values, unstructure_df = helper.get_data_from_core(
                config.solr_unstructure_data, prod_query, params)
            if len(unstructure_values) > 0:
                try:
                    for data in unstructure_values:
                        json_make = {}
                        product = data.get("PRODUCT", config.hypen_delimiter)
                        product_type = data.get("PRODUCT_TYPE",
                                                config.hypen_delimiter)
                        datastr = json.loads(data.get("DATA_EXTRACT", {}))
                        result_spec = data.get("SPEC_ID")
                        spec_id = helper.finding_spec_details(
                            spec_list, result_spec)
                        path = datastr.get("image_path")
                        if path != None:
                            if path.lower().endswith('pdf'):
                                file_type = 'pdf'
                            elif path.lower().endswith('png'):
                                file_type = 'png'
                            else:
                                file_type = 'others'
                            file_split = path.split("/")
                            file_source = ''
                            for source in config.file_sources:
                                if source in file_split:
                                    file_source = source
                                    break
                            filename = datastr.get("file_name",
                                                   config.hypen_delimiter)
                            if '.pdf' in filename:
                                filename = filename[:-4]
                            json_make["filename"] = filename
                            json_make["file_source"] = file_source
                            json_make["file_Type"] = file_type
                            json_make["product"] = product
                            json_make["product_Type"] = product_type
                            path = helper.replace_char_in_url(path)
                            json_make[
                                "prod_App"] = config.blob_file_path + path.replace(
                                    "/dbfs/mnt/", "") + config.sas_token
                            json_make["spec_Id"] = spec_id
                            json_list.append(json_make)
                        else:
                            continue
                except Exception as e:
                    pass
            product_attributes_result.append(
                {"product_Application": json_list})
        elif sub_category == "GHS Labeling":
            spec_json, spec_list = spec_constructor(req_body)
            spec_join = (config.or_delimiter).join(spec_list)
            spec_query = f'SUBID:({spec_join})'
            ghs_values, ghs_df = helper.get_data_from_core(
                config.solr_ghs_labeling_list_data, spec_query)
            total_phrky = []
            if len(ghs_values) > 0:
                for key_column in config.ghs_label:
                    try:
                        if key_column in list(ghs_df.columns):
                            phrase_key = list(ghs_df[key_column].unique())
                            phrase_split = ";".join(phrase_key)
                            total_phrky += phrase_split.split(";")
                    except Exception as e:
                        pass
                #finding phrase text
                # phrase_key_query=(config.or_delimiter).join(total_phrky)
                phrase_key_query = helper.replace_character_for_querying(
                    total_phrky)
                query = f'PHRKY:({phrase_key_query})'
                params = {"fl": config.phrase_column_str}
                key_value, key_value_df = helper.get_data_from_core(
                    config.solr_phrase_translation, query, params)
                for data in ghs_values:
                    try:
                        json_make = {}
                        specid = data.get("SUBID", "")
                        spec_nam_str = specid + (config.hypen_delimiter
                                                 ) + spec_json.get(specid, "")
                        json_make["spec_Id"] = spec_nam_str
                        json_make["usage"] = str(
                            data.get("ZUSAGE", config.hypen_delimiter).strip())
                        json_make[
                            "regulatory_Basis"] = helper.finding_phrase_text(
                                key_value_df,
                                str(data.get("REBAS", "")).strip())
                        json_make["signal_Word"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("SIGWD", "")).strip())
                        json_make[
                            "hazard_Statements"] = helper.finding_phrase_text(
                                key_value_df,
                                str(data.get("HAZST", "")).strip())
                        json_make[
                            "prec_Statements"] = helper.finding_phrase_text(
                                key_value_df,
                                str(data.get("PRSTG", "")).strip())
                        json_make["prstp"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTP", "")).strip())
                        json_make["prstr"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTR", "")).strip())
                        json_make["prsts"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTS", "")).strip())
                        json_make["prstd"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTD", "")).strip())
                        add_info = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("ADDIN", "")).strip())
                        remarks = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("REMAR", "")).strip())
                        add_remarks = config.hypen_delimiter
                        if (add_info != config.hypen_delimiter) and (
                                remarks != config.hypen_delimiter):
                            add_remarks = add_info + (
                                config.comma_delimiter) + remarks
                        elif (add_info != config.hypen_delimiter):
                            add_remarks = add_info
                        elif (remarks != config.hypen_delimiter):
                            add_remarks = remarks
                        json_make[
                            "additional_Information_remarks"] = add_remarks
                        #symbols
                        symbols = []
                        path_list = []
                        symbol_text = []
                        text_list = []
                        symbol_value = str(data.get("SYMBL", "")).strip()
                        key_list = symbol_value.split(';')
                        if len(key_list) > 0 and ("PHRKY" in list(
                                key_value_df.columns)) and ("GRAPH" in list(
                                    key_value_df.columns)):
                            text_df = key_value_df[key_value_df["PHRKY"].isin(
                                key_list)]
                            path_list = list(text_df["GRAPH"].unique())
                            text_list = list(text_df["PTEXT"].unique())
                        if len(path_list) > 0:
                            for file in path_list:
                                path = (config.ghs_image_path) + file + (
                                    config.sas_token)
                                symbols.append({"name": path})
                        json_make["symbols"] = symbols
                        json_make["symbols_Text"] = (
                            config.comma_delimiter).join(text_list)
                        if str(
                                data.get("ZUSAGE", config.hypen_delimiter).
                                strip()).upper() != 'PUBLIC: REG_EU':
                            json_list.append(json_make)
                    except Exception as e:
                        pass
            product_attributes_result.append({"ghs_Labeling": json_list})
        elif sub_category in ["Structures and Formulas", "Flow Diagrams"]:
            chem_structure = []
            molecular_formula = []
            molecular_weight = []
            man_flow_dg = []
            synthesis_dg = []
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            std, std_df, legal, legal_df = helper.make_common_query_for_std_legal_composition(
                all_details_json)
            if sub_category == "Structures and Formulas":
                un_category = config.structure_category
            else:
                un_category = ["man_flow_diagram", "syn_flow_diagram"]
            query = helper.unstructure_template(all_details_json, un_category)
            params = {"fl": config.unstructure_column_str}
            unstructure_values, unstructure_df = helper.get_data_from_core(
                config.solr_unstructure_data, query, params)
            if len(unstructure_values) > 0:
                for item in unstructure_values:
                    try:
                        json_make = {}
                        datastr = {}
                        category = item.get("CATEGORY", config.hypen_delimiter)
                        datastr = json.loads(item.get("DATA_EXTRACT", {}))
                        result_spec = item.get("SPEC_ID")
                        product = item.get("PRODUCT", config.hypen_delimiter)
                        product_type = item.get("PRODUCT_TYPE",
                                                config.hypen_delimiter)
                        spec_id = helper.finding_spec_details(
                            spec_list, result_spec)
                        path = datastr.get("file_path", config.hypen_delimiter)
                        path = helper.replace_char_in_url(path)
                        std_find = []
                        legal_find = []
                        std_flag = "No"
                        legal_flag = "No"
                        #checking std and legal compositon condition
                        if product_type in ["NUMCAS"]:
                            specid_list = spec_id.split(config.pipe_delimitter)
                            if "CAS" in list(
                                    std_df.columns) and "SUBID" in list(
                                        std_df.columns):
                                std_find = std_df[
                                    (std_df["CAS"] == product)
                                    & (std_df["SUBID"].isin(specid_list))]
                            elif "CAS" in list(
                                    legal_df.columns) and "SUBID" in list(
                                        legal_df.columns):
                                legal_find = legal_df[
                                    (legal_df["CAS"] == product)
                                    & (legal_df["SUBID"].isin(specid_list))]
                            if len(std_find) == 0 and len(legal_find) == 0:
                                continue
                            else:
                                if len(std_find) > 0:
                                    std_flag = "Yes"
                                if len(legal_find) > 0:
                                    legal_flag = "Yes"
                                json_make["standardComposition"] = std_flag
                                json_make["legalComposition"] = legal_flag
                        if path.lower().endswith('pdf'):
                            file_type = 'pdf'
                        elif path.lower().endswith('png'):
                            file_type = 'png'
                        else:
                            file_type = 'others'
                        file_split = path.split("/")
                        file_source = ''
                        for source in config.file_sources:
                            if source in file_split:
                                file_source = source
                                break
                        json_make["spec_Id"] = spec_id
                        json_make["file_Source"] = file_source
                        json_make["product_Type"] = product_type
                        json_make["productName"] = product
                        if category == "Chemical Structure":
                            filename = datastr.get(
                                "file_path", config.hypen_delimiter).split("/")
                            if len(filename) > 0:
                                json_make["fileName"] = filename[-1]
                            else:
                                json_make["fileName"] = config.hypen_delimiter
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            chem_structure.append(json_make)
                        elif category == "molecular formula":
                            path = datastr.get("image_path")
                            if path != None:
                                if path.lower().endswith('pdf'):
                                    file_type = 'pdf'
                                elif path.lower().endswith('png'):
                                    file_type = 'png'
                                else:
                                    file_type = 'others'
                                json_make["fileName"] = datastr.get(
                                    "file_name", config.hypen_delimiter)
                                json_make["file_Path"] = (
                                    config.blob_file_path) + path.replace(
                                        "/dbfs/mnt/", "") + (config.sas_token)
                                json_make["file_Type"] = file_type
                                molecular_formula.append(json_make)
                            else:
                                continue
                        elif category == "Molecular-Weight":
                            json_make["fileName"] = datastr.get(
                                "file_name", config.hypen_delimiter)
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            weight = datastr.get("Molecular Weight")
                            if weight != None:
                                json_make["moelcular_Weight"] = weight
                            else:
                                continue
                            molecular_weight.append(json_make)
                        elif category == "man_flow_diagram":
                            filename = datastr.get(
                                "file_path", config.hypen_delimiter).split("/")
                            if len(filename) > 0:
                                json_make["fileName"] = filename[-1]
                            else:
                                json_make["fileName"] = config.hypen_delimiter
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            man_flow_dg.append(json_make)

                        elif category == "syn_flow_diagram":
                            filename = datastr.get(
                                "file_path", config.hypen_delimiter).split("/")
                            if len(filename) > 0:
                                json_make["fileName"] = filename[-1]
                            else:
                                json_make["fileName"] = config.hypen_delimiter
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            synthesis_dg.append(json_make)
                            json_make = {}
                    except Exception as e:
                        pass
            if sub_category == "Structures and Formulas":
                product_attributes_result.append(
                    {"chemical_Structure": chem_structure})
                product_attributes_result.append(
                    {"molecular_Formula": molecular_formula})
                product_attributes_result.append(
                    {"molecular_Weight": molecular_weight})
            else:
                product_attributes_result.append(
                    {"manufacture_Flow": man_flow_dg})
                product_attributes_result.append(
                    {"synthesis_Diagram": synthesis_dg})
        elif sub_category == "Composition":
            logging.info(f"product_attributes_request_body {req_body}")
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            idtxt = []
            #finding Relables
            spec_join = (config.or_delimiter).join(spec_list)
            spec_query = f'SUBID:({spec_join})'
            params = {"fl": config.relable_column_str}
            result, result_df = helper.get_data_from_core(
                config.solr_substance_identifier, spec_query, params)
            if len(result_df.columns) != len(config.relable_column):
                dummy = pd.DataFrame([], columns=config.relable_column)
                result_df = pd.concat([result_df, dummy])
            result_df = result_df.fillna("-")
            result_df = result_df.replace({"NULL": "-"})
            for item in all_details_json:
                try:
                    json_make = {}
                    json_make["spec_id"] = item
                    nam_df = result_df[(result_df["IDCAT"] == "NAM")
                                       & (result_df["IDTYP"] == "PROD") &
                                       (result_df["DELFLG"] != 'X')]
                    nam_list = list(nam_df["IDTXT"].unique())
                    if len(nam_list) > 0:
                        product_identify = (
                            config.comma_delimiter).join(nam_list)
                    else:
                        product_identify = config.hypen_delimiter
                    json_make["product_Identification"] = product_identify
                    namprod_str = (config.comma_delimiter).join(
                        all_details_json.get(item).get("namprod", []))
                    idtxt_df = result_df[(result_df["IDCAT"] == "NAM")
                                         & (result_df["IDTYP"] == "PROD_RLBL")
                                         & (result_df["LANGU"].isin(
                                             ["E", "", "-"])) &
                                         (result_df["SUBID"] == item)]
                    idtxt = list(idtxt_df["IDTXT"].unique())
                    if len(idtxt) > 0:
                        json_make["relabels"] = (
                            config.comma_delimiter).join(idtxt)
                    else:
                        json_make["relabels"] = "-"
                except Exception as e:
                    pass
            #finding inciname
            query = f'TYPE:MATNBR && TEXT2:({spec_join}) && -TYPE:SUBIDREL && -TEXT6:X'
            params = {"fl": config.solr_product_column}
            mat_values, mat_df = helper.get_data_from_core(
                config.solr_product, query, params)
            bdt = []
            if "TEXT3" in mat_df.columns:
                bdt = list(mat_df["TEXT3"].unique())
            display_inci_name = []
            # for spec in all_details_json:
            #     bdt+=all_details_json.get(spec).get("bdt",[])
            bdt_query = helper.replace_character_for_querying(bdt)
            query = f'BDTXT:({bdt_query}) && SUBID:({spec_join})'
            inci_values, inci_df = helper.get_data_from_core(
                config.solr_inci_name, query)
            inci_df.drop_duplicates(inplace=True)
            if "INCINAME" in list(inci_df.columns) and "BDTXT" in list(
                    inci_df.columns):
                bdtxt_df = inci_df[["BDTXT", "INCINAME"]]
                bdtxt_df.drop_duplicates(inplace=True)
                bdtxt_list = bdtxt_df.values.tolist()
                for bdtxt, inci in bdtxt_list:
                    temp = bdtxt + (config.pipe_delimitter) + inci
                    display_inci_name.append(temp)

            json_make["INCI_name"] = (
                config.comma_delimiter).join(display_inci_name)
            json_list.append(json_make)
            #finding material level
            spec_with_namprod = f"{spec_list[0]} - {namprod_str}"
            materials = []
            active_material = []
            all_material = []
            # if material_query!='':
            for item in mat_values:
                try:
                    json_make = {}
                    material_number = item.get("TEXT1", config.hypen_delimiter)
                    description = item.get("TEXT4", config.hypen_delimiter)
                    bdt = item.get("TEXT3", config.hypen_delimiter)
                    if str(item.get("TEXT5")).strip() != 'X':
                        json_make["material_Number"] = material_number
                        json_make["description"] = description
                        json_make["bdt"] = bdt
                        json_make["real_Spec_Id"] = spec_with_namprod
                        active_material.append(json_make)
                        json_make = {}
                    json_make["material_Number"] = material_number
                    json_make["description"] = description
                    json_make["bdt"] = bdt
                    json_make["real_Spec_Id"] = spec_with_namprod
                    all_material.append(json_make)
                except Exception as e:
                    pass
            #Finding usage for compositon
            cas_list = []
            for spec in all_details_json:
                cas_list += all_details_json.get(spec).get("pure_spec_id")
            cas_query = (config.or_delimiter).join(cas_list)
            spec_query = (config.or_delimiter).join(spec_list)
            std_hund_list, usage_catgory, legal_list, legal_usage = find_zusage(
                all_details_json, cas_query, spec_query)
            if len(usage_catgory) > 0:
                validity = usage_catgory[0]
                std_values = find_std_hundrd_composition_details(
                    validity, cas_query, spec_query, req_body,
                    all_details_json, spec_with_namprod)
            else:
                std_values = []
                std_hund_list = []
            if len(legal_usage) > 0:
                validity = legal_usage[0]
                legal_values = find_legal_composition_details(
                    validity, cas_query, spec_query, req_body,
                    all_details_json, cas_list, spec_with_namprod)
            else:
                legal_values = {"legal_composition": [], "svt": []}
                legal_list = []
            #finding default value for std composition
            json_make = {}
            json_make["product_Level"] = json_list
            json_make["active_material"] = active_material
            json_make["all_material"] = all_material
            json_make["std_hund_usage"] = std_hund_list
            json_make["legal_usage"] = legal_list
            json_make["std_values"] = std_values
            json_make["legal_values"] = legal_values
            product_attributes_result = [json_make]
        elif sub_category in [
                "Standard, 100 % & INCI Composition", "Legal Composition"
        ]:
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            cas_list = []
            spec_list = []
            for spec in all_details_json:
                spec_list.append(spec)
                cas_list += all_details_json.get(spec).get("pure_spec_id")
            cas_query = (config.or_delimiter).join(cas_list)
            spec_query = (config.or_delimiter).join(spec_list)
            if validity is None:
                std_hund_list, usage_catgory, legal_list, legal_usage = find_zusage(
                    all_details_json, cas_query, spec_query)
                if sub_category == "Standard, 100 % & INCI Composition":
                    return std_hund_list
                elif sub_category == "Legal Composition":
                    return legal_list
            if validity is not None:
                if sub_category == "Standard, 100 % & INCI Composition":
                    std_values = find_std_hundrd_composition_details(
                        validity, cas_query, spec_query, req_body,
                        all_details_json)
                    return std_values
                elif sub_category == "Legal Composition":
                    legal_values = find_legal_composition_details(
                        validity, cas_query, spec_query, req_body,
                        all_details_json, cas_list)
                    return legal_values
        return product_attributes_result
    except Exception as e:
        return product_attributes_result