Example #1
0
def get_bio_compatibility_details(spec_json, spec_list):
    try:
        details = []
        spec_query = (config.or_delimiter).join(spec_list)
        query = f'SUBID:({spec_query})'
        check_column = ["BSTAT", "BTEST"]
        values, df = helper.get_data_from_core(
            config.solr_biocompatibility_testing_pri, query)
        key_value, key_value_df = get_related_phrase_text(check_column, df)
        for data in values:
            try:
                json_make = {}
                specid = data.get("SUBID", "")
                spec_nam_str = specid + (
                    config.hypen_delimiter) + spec_json.get(specid, "")
                json_make["spec_Id"] = spec_nam_str
                json_make["usage"] = str(
                    data.get("ZUSAGE", config.hypen_delimiter).strip())
                json_make["bstat"] = helper.finding_phrase_text(
                    key_value_df,
                    str(data.get("BSTAT", "")).strip())
                json_make["btest"] = helper.finding_phrase_text(
                    key_value_df,
                    str(data.get("BTEST", "")).strip())
                details.append(json_make)
            except Exception as e:
                pass
        return details
    except Exception as e:
        return []
def get_product_compliance_details(spec_list, founded_category):
    try:
        negative_country = []
        positive_country = []
        others = []
        active_region = []
        inactive_region = []
        active_str = "No active region found"
        spec_query = (config.or_delimiter).join(spec_list)
        query = f'SUBID:({spec_query})'
        params = {"fl": config.notification_column_str}
        pcomp, pcomp_df = helper.get_data_from_core(
            config.solr_notification_status, query, params)
        if ("NOTIF" in list(pcomp_df.columns)) and len(pcomp) > 0:
            phrase_key = (list(pcomp_df["NOTIF"].unique()))
            phrase_split = ";".join(phrase_key)
            phrase_key = phrase_split.split(";")
            phrase_key_query = helper.replace_character_for_querying(
                phrase_key)
            query = f'PHRKY:({phrase_key_query})'
            params = {"fl": config.phrase_column_str}
            key_value, key_value_df = helper.get_data_from_core(
                config.solr_phrase_translation, query, params)
            key_compare = key_value_df.values.tolist()
            negative_country = []
            positive_country = []
            others = []
            for item in pcomp:
                try:
                    place = item.get("RLIST", config.hypen_delimiter)
                    key = str(item.get("NOTIF", "")).strip()
                    key_text = helper.finding_phrase_text(key_value_df, key)
                    if key_text.lower().strip(
                    ) in config.in_compliance_notification_status:
                        positive_country.append(place)
                    elif key_text.lower().strip(
                    ) in config.not_in_compliance_notification_status:
                        negative_country.append(place)
                    else:
                        others.append(place)
                except Exception as e:
                    pass

        #ag registartion
        for region in config.ag_registration_country:
            if region in founded_category:
                active_region.append(
                    config.ag_registration_country.get(region))
            else:
                inactive_region.append(
                    config.ag_registration_country.get(region))
        if len(active_region) > 0:
            active_str = (config.comma_delimiter).join(active_region)

    except Exception as e:
        pass
    return positive_country, negative_country, others, active_str
def get_product_attributes(req_body):
    try:
        product_attributes_result = []
        json_list = []
        sub_category = req_body.get("Category_details").get("Subcategory")
        validity = req_body.get("Category_details").get("validity")
        if sub_category == "Basic Information":
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            idtxt = []
            #finding Relables
            spec_join = (config.or_delimiter).join(spec_list)
            spec_query = f'SUBID:({spec_join})'
            params = {"fl": config.relable_column_str}
            result, result_df = helper.get_data_from_core(
                config.solr_substance_identifier, spec_query, params)
            if len(result_df.columns) != len(config.relable_column):
                dummy = pd.DataFrame([], columns=config.relable_column)
                result_df = pd.concat([result_df, dummy])
            result_df = result_df.fillna("-")
            result_df = result_df.replace({"NULL": "-"})
            for item in all_details_json:
                json_make = {}
                json_make["spec_id"] = item
                json_make["product_Identification"] = (
                    config.comma_delimiter).join(
                        all_details_json.get(item).get("namprod", []))
                idtxt_df = result_df[(result_df["IDCAT"] == "NAM")
                                     & (result_df["IDTYP"] == "PROD_RLBL") &
                                     (result_df["LANGU"].isin(["E", "", "-"]))
                                     & (result_df["SUBID"] == item)]
                idtxt = list(idtxt_df["IDTXT"].unique())
                if len(idtxt) > 0:
                    json_make["relabels"] = (
                        config.comma_delimiter).join(idtxt)
                else:
                    json_make["relabels"] = "-"
                json_list.append(json_make)
            product_attributes_result.append({"basic_details": json_list})
            #product Application
            json_list = []
            category = ["Prod-App"]
            prod_query = helper.unstructure_template(all_details_json,
                                                     category)
            params = {"fl": config.unstructure_column_str}
            unstructure_values, unstructure_df = helper.get_data_from_core(
                config.solr_unstructure_data, prod_query, params)
            if len(unstructure_values) > 0:
                try:
                    for data in unstructure_values:
                        json_make = {}
                        product = data.get("PRODUCT", config.hypen_delimiter)
                        product_type = data.get("PRODUCT_TYPE",
                                                config.hypen_delimiter)
                        datastr = json.loads(data.get("DATA_EXTRACT", {}))
                        result_spec = data.get("SPEC_ID")
                        spec_id = helper.finding_spec_details(
                            spec_list, result_spec)
                        path = datastr.get("image_path")
                        if path != None:
                            if path.lower().endswith('pdf'):
                                file_type = 'pdf'
                            elif path.lower().endswith('png'):
                                file_type = 'png'
                            else:
                                file_type = 'others'
                            file_split = path.split("/")
                            file_source = ''
                            for source in config.file_sources:
                                if source in file_split:
                                    file_source = source
                                    break
                            filename = datastr.get("file_name",
                                                   config.hypen_delimiter)
                            if '.pdf' in filename:
                                filename = filename[:-4]
                            json_make["filename"] = filename
                            json_make["file_source"] = file_source
                            json_make["file_Type"] = file_type
                            json_make["product"] = product
                            json_make["product_Type"] = product_type
                            path = helper.replace_char_in_url(path)
                            json_make[
                                "prod_App"] = config.blob_file_path + path.replace(
                                    "/dbfs/mnt/", "") + config.sas_token
                            json_make["spec_Id"] = spec_id
                            json_list.append(json_make)
                        else:
                            continue
                except Exception as e:
                    pass
            product_attributes_result.append(
                {"product_Application": json_list})
        elif sub_category == "GHS Labeling":
            spec_json, spec_list = spec_constructor(req_body)
            spec_join = (config.or_delimiter).join(spec_list)
            spec_query = f'SUBID:({spec_join})'
            ghs_values, ghs_df = helper.get_data_from_core(
                config.solr_ghs_labeling_list_data, spec_query)
            total_phrky = []
            if len(ghs_values) > 0:
                for key_column in config.ghs_label:
                    try:
                        if key_column in list(ghs_df.columns):
                            phrase_key = list(ghs_df[key_column].unique())
                            phrase_split = ";".join(phrase_key)
                            total_phrky += phrase_split.split(";")
                    except Exception as e:
                        pass
                #finding phrase text
                # phrase_key_query=(config.or_delimiter).join(total_phrky)
                phrase_key_query = helper.replace_character_for_querying(
                    total_phrky)
                query = f'PHRKY:({phrase_key_query})'
                params = {"fl": config.phrase_column_str}
                key_value, key_value_df = helper.get_data_from_core(
                    config.solr_phrase_translation, query, params)
                for data in ghs_values:
                    try:
                        json_make = {}
                        specid = data.get("SUBID", "")
                        spec_nam_str = specid + (config.hypen_delimiter
                                                 ) + spec_json.get(specid, "")
                        json_make["spec_Id"] = spec_nam_str
                        json_make["usage"] = str(
                            data.get("ZUSAGE", config.hypen_delimiter).strip())
                        json_make[
                            "regulatory_Basis"] = helper.finding_phrase_text(
                                key_value_df,
                                str(data.get("REBAS", "")).strip())
                        json_make["signal_Word"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("SIGWD", "")).strip())
                        json_make[
                            "hazard_Statements"] = helper.finding_phrase_text(
                                key_value_df,
                                str(data.get("HAZST", "")).strip())
                        json_make[
                            "prec_Statements"] = helper.finding_phrase_text(
                                key_value_df,
                                str(data.get("PRSTG", "")).strip())
                        json_make["prstp"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTP", "")).strip())
                        json_make["prstr"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTR", "")).strip())
                        json_make["prsts"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTS", "")).strip())
                        json_make["prstd"] = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("PRSTD", "")).strip())
                        add_info = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("ADDIN", "")).strip())
                        remarks = helper.finding_phrase_text(
                            key_value_df,
                            str(data.get("REMAR", "")).strip())
                        add_remarks = config.hypen_delimiter
                        if (add_info != config.hypen_delimiter) and (
                                remarks != config.hypen_delimiter):
                            add_remarks = add_info + (
                                config.comma_delimiter) + remarks
                        elif (add_info != config.hypen_delimiter):
                            add_remarks = add_info
                        elif (remarks != config.hypen_delimiter):
                            add_remarks = remarks
                        json_make[
                            "additional_Information_remarks"] = add_remarks
                        #symbols
                        symbols = []
                        path_list = []
                        symbol_text = []
                        text_list = []
                        symbol_value = str(data.get("SYMBL", "")).strip()
                        key_list = symbol_value.split(';')
                        if len(key_list) > 0 and ("PHRKY" in list(
                                key_value_df.columns)) and ("GRAPH" in list(
                                    key_value_df.columns)):
                            text_df = key_value_df[key_value_df["PHRKY"].isin(
                                key_list)]
                            path_list = list(text_df["GRAPH"].unique())
                            text_list = list(text_df["PTEXT"].unique())
                        if len(path_list) > 0:
                            for file in path_list:
                                path = (config.ghs_image_path) + file + (
                                    config.sas_token)
                                symbols.append({"name": path})
                        json_make["symbols"] = symbols
                        json_make["symbols_Text"] = (
                            config.comma_delimiter).join(text_list)
                        if str(
                                data.get("ZUSAGE", config.hypen_delimiter).
                                strip()).upper() != 'PUBLIC: REG_EU':
                            json_list.append(json_make)
                    except Exception as e:
                        pass
            product_attributes_result.append({"ghs_Labeling": json_list})
        elif sub_category in ["Structures and Formulas", "Flow Diagrams"]:
            chem_structure = []
            molecular_formula = []
            molecular_weight = []
            man_flow_dg = []
            synthesis_dg = []
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            std, std_df, legal, legal_df = helper.make_common_query_for_std_legal_composition(
                all_details_json)
            if sub_category == "Structures and Formulas":
                un_category = config.structure_category
            else:
                un_category = ["man_flow_diagram", "syn_flow_diagram"]
            query = helper.unstructure_template(all_details_json, un_category)
            params = {"fl": config.unstructure_column_str}
            unstructure_values, unstructure_df = helper.get_data_from_core(
                config.solr_unstructure_data, query, params)
            if len(unstructure_values) > 0:
                for item in unstructure_values:
                    try:
                        json_make = {}
                        datastr = {}
                        category = item.get("CATEGORY", config.hypen_delimiter)
                        datastr = json.loads(item.get("DATA_EXTRACT", {}))
                        result_spec = item.get("SPEC_ID")
                        product = item.get("PRODUCT", config.hypen_delimiter)
                        product_type = item.get("PRODUCT_TYPE",
                                                config.hypen_delimiter)
                        spec_id = helper.finding_spec_details(
                            spec_list, result_spec)
                        path = datastr.get("file_path", config.hypen_delimiter)
                        path = helper.replace_char_in_url(path)
                        std_find = []
                        legal_find = []
                        std_flag = "No"
                        legal_flag = "No"
                        #checking std and legal compositon condition
                        if product_type in ["NUMCAS"]:
                            specid_list = spec_id.split(config.pipe_delimitter)
                            if "CAS" in list(
                                    std_df.columns) and "SUBID" in list(
                                        std_df.columns):
                                std_find = std_df[
                                    (std_df["CAS"] == product)
                                    & (std_df["SUBID"].isin(specid_list))]
                            elif "CAS" in list(
                                    legal_df.columns) and "SUBID" in list(
                                        legal_df.columns):
                                legal_find = legal_df[
                                    (legal_df["CAS"] == product)
                                    & (legal_df["SUBID"].isin(specid_list))]
                            if len(std_find) == 0 and len(legal_find) == 0:
                                continue
                            else:
                                if len(std_find) > 0:
                                    std_flag = "Yes"
                                if len(legal_find) > 0:
                                    legal_flag = "Yes"
                                json_make["standardComposition"] = std_flag
                                json_make["legalComposition"] = legal_flag
                        if path.lower().endswith('pdf'):
                            file_type = 'pdf'
                        elif path.lower().endswith('png'):
                            file_type = 'png'
                        else:
                            file_type = 'others'
                        file_split = path.split("/")
                        file_source = ''
                        for source in config.file_sources:
                            if source in file_split:
                                file_source = source
                                break
                        json_make["spec_Id"] = spec_id
                        json_make["file_Source"] = file_source
                        json_make["product_Type"] = product_type
                        json_make["productName"] = product
                        if category == "Chemical Structure":
                            filename = datastr.get(
                                "file_path", config.hypen_delimiter).split("/")
                            if len(filename) > 0:
                                json_make["fileName"] = filename[-1]
                            else:
                                json_make["fileName"] = config.hypen_delimiter
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            chem_structure.append(json_make)
                        elif category == "molecular formula":
                            path = datastr.get("image_path")
                            if path != None:
                                if path.lower().endswith('pdf'):
                                    file_type = 'pdf'
                                elif path.lower().endswith('png'):
                                    file_type = 'png'
                                else:
                                    file_type = 'others'
                                json_make["fileName"] = datastr.get(
                                    "file_name", config.hypen_delimiter)
                                json_make["file_Path"] = (
                                    config.blob_file_path) + path.replace(
                                        "/dbfs/mnt/", "") + (config.sas_token)
                                json_make["file_Type"] = file_type
                                molecular_formula.append(json_make)
                            else:
                                continue
                        elif category == "Molecular-Weight":
                            json_make["fileName"] = datastr.get(
                                "file_name", config.hypen_delimiter)
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            weight = datastr.get("Molecular Weight")
                            if weight != None:
                                json_make["moelcular_Weight"] = weight
                            else:
                                continue
                            molecular_weight.append(json_make)
                        elif category == "man_flow_diagram":
                            filename = datastr.get(
                                "file_path", config.hypen_delimiter).split("/")
                            if len(filename) > 0:
                                json_make["fileName"] = filename[-1]
                            else:
                                json_make["fileName"] = config.hypen_delimiter
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            man_flow_dg.append(json_make)

                        elif category == "syn_flow_diagram":
                            filename = datastr.get(
                                "file_path", config.hypen_delimiter).split("/")
                            if len(filename) > 0:
                                json_make["fileName"] = filename[-1]
                            else:
                                json_make["fileName"] = config.hypen_delimiter
                            json_make["file_Path"] = (
                                config.blob_file_path) + path.replace(
                                    "/dbfs/mnt/", "") + (config.sas_token)
                            json_make["file_Type"] = file_type
                            synthesis_dg.append(json_make)
                            json_make = {}
                    except Exception as e:
                        pass
            if sub_category == "Structures and Formulas":
                product_attributes_result.append(
                    {"chemical_Structure": chem_structure})
                product_attributes_result.append(
                    {"molecular_Formula": molecular_formula})
                product_attributes_result.append(
                    {"molecular_Weight": molecular_weight})
            else:
                product_attributes_result.append(
                    {"manufacture_Flow": man_flow_dg})
                product_attributes_result.append(
                    {"synthesis_Diagram": synthesis_dg})
        elif sub_category == "Composition":
            logging.info(f"product_attributes_request_body {req_body}")
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            idtxt = []
            #finding Relables
            spec_join = (config.or_delimiter).join(spec_list)
            spec_query = f'SUBID:({spec_join})'
            params = {"fl": config.relable_column_str}
            result, result_df = helper.get_data_from_core(
                config.solr_substance_identifier, spec_query, params)
            if len(result_df.columns) != len(config.relable_column):
                dummy = pd.DataFrame([], columns=config.relable_column)
                result_df = pd.concat([result_df, dummy])
            result_df = result_df.fillna("-")
            result_df = result_df.replace({"NULL": "-"})
            for item in all_details_json:
                try:
                    json_make = {}
                    json_make["spec_id"] = item
                    nam_df = result_df[(result_df["IDCAT"] == "NAM")
                                       & (result_df["IDTYP"] == "PROD") &
                                       (result_df["DELFLG"] != 'X')]
                    nam_list = list(nam_df["IDTXT"].unique())
                    if len(nam_list) > 0:
                        product_identify = (
                            config.comma_delimiter).join(nam_list)
                    else:
                        product_identify = config.hypen_delimiter
                    json_make["product_Identification"] = product_identify
                    namprod_str = (config.comma_delimiter).join(
                        all_details_json.get(item).get("namprod", []))
                    idtxt_df = result_df[(result_df["IDCAT"] == "NAM")
                                         & (result_df["IDTYP"] == "PROD_RLBL")
                                         & (result_df["LANGU"].isin(
                                             ["E", "", "-"])) &
                                         (result_df["SUBID"] == item)]
                    idtxt = list(idtxt_df["IDTXT"].unique())
                    if len(idtxt) > 0:
                        json_make["relabels"] = (
                            config.comma_delimiter).join(idtxt)
                    else:
                        json_make["relabels"] = "-"
                except Exception as e:
                    pass
            #finding inciname
            query = f'TYPE:MATNBR && TEXT2:({spec_join}) && -TYPE:SUBIDREL && -TEXT6:X'
            params = {"fl": config.solr_product_column}
            mat_values, mat_df = helper.get_data_from_core(
                config.solr_product, query, params)
            bdt = []
            if "TEXT3" in mat_df.columns:
                bdt = list(mat_df["TEXT3"].unique())
            display_inci_name = []
            # for spec in all_details_json:
            #     bdt+=all_details_json.get(spec).get("bdt",[])
            bdt_query = helper.replace_character_for_querying(bdt)
            query = f'BDTXT:({bdt_query}) && SUBID:({spec_join})'
            inci_values, inci_df = helper.get_data_from_core(
                config.solr_inci_name, query)
            inci_df.drop_duplicates(inplace=True)
            if "INCINAME" in list(inci_df.columns) and "BDTXT" in list(
                    inci_df.columns):
                bdtxt_df = inci_df[["BDTXT", "INCINAME"]]
                bdtxt_df.drop_duplicates(inplace=True)
                bdtxt_list = bdtxt_df.values.tolist()
                for bdtxt, inci in bdtxt_list:
                    temp = bdtxt + (config.pipe_delimitter) + inci
                    display_inci_name.append(temp)

            json_make["INCI_name"] = (
                config.comma_delimiter).join(display_inci_name)
            json_list.append(json_make)
            #finding material level
            spec_with_namprod = f"{spec_list[0]} - {namprod_str}"
            materials = []
            active_material = []
            all_material = []
            # if material_query!='':
            for item in mat_values:
                try:
                    json_make = {}
                    material_number = item.get("TEXT1", config.hypen_delimiter)
                    description = item.get("TEXT4", config.hypen_delimiter)
                    bdt = item.get("TEXT3", config.hypen_delimiter)
                    if str(item.get("TEXT5")).strip() != 'X':
                        json_make["material_Number"] = material_number
                        json_make["description"] = description
                        json_make["bdt"] = bdt
                        json_make["real_Spec_Id"] = spec_with_namprod
                        active_material.append(json_make)
                        json_make = {}
                    json_make["material_Number"] = material_number
                    json_make["description"] = description
                    json_make["bdt"] = bdt
                    json_make["real_Spec_Id"] = spec_with_namprod
                    all_material.append(json_make)
                except Exception as e:
                    pass
            #Finding usage for compositon
            cas_list = []
            for spec in all_details_json:
                cas_list += all_details_json.get(spec).get("pure_spec_id")
            cas_query = (config.or_delimiter).join(cas_list)
            spec_query = (config.or_delimiter).join(spec_list)
            std_hund_list, usage_catgory, legal_list, legal_usage = find_zusage(
                all_details_json, cas_query, spec_query)
            if len(usage_catgory) > 0:
                validity = usage_catgory[0]
                std_values = find_std_hundrd_composition_details(
                    validity, cas_query, spec_query, req_body,
                    all_details_json, spec_with_namprod)
            else:
                std_values = []
                std_hund_list = []
            if len(legal_usage) > 0:
                validity = legal_usage[0]
                legal_values = find_legal_composition_details(
                    validity, cas_query, spec_query, req_body,
                    all_details_json, cas_list, spec_with_namprod)
            else:
                legal_values = {"legal_composition": [], "svt": []}
                legal_list = []
            #finding default value for std composition
            json_make = {}
            json_make["product_Level"] = json_list
            json_make["active_material"] = active_material
            json_make["all_material"] = all_material
            json_make["std_hund_usage"] = std_hund_list
            json_make["legal_usage"] = legal_list
            json_make["std_values"] = std_values
            json_make["legal_values"] = legal_values
            product_attributes_result = [json_make]
        elif sub_category in [
                "Standard, 100 % & INCI Composition", "Legal Composition"
        ]:
            all_details_json, spec_list, material_list = helper.construct_common_level_json(
                req_body)
            cas_list = []
            spec_list = []
            for spec in all_details_json:
                spec_list.append(spec)
                cas_list += all_details_json.get(spec).get("pure_spec_id")
            cas_query = (config.or_delimiter).join(cas_list)
            spec_query = (config.or_delimiter).join(spec_list)
            if validity is None:
                std_hund_list, usage_catgory, legal_list, legal_usage = find_zusage(
                    all_details_json, cas_query, spec_query)
                if sub_category == "Standard, 100 % & INCI Composition":
                    return std_hund_list
                elif sub_category == "Legal Composition":
                    return legal_list
            if validity is not None:
                if sub_category == "Standard, 100 % & INCI Composition":
                    std_values = find_std_hundrd_composition_details(
                        validity, cas_query, spec_query, req_body,
                        all_details_json)
                    return std_values
                elif sub_category == "Legal Composition":
                    legal_values = find_legal_composition_details(
                        validity, cas_query, spec_query, req_body,
                        all_details_json, cas_list)
                    return legal_values
        return product_attributes_result
    except Exception as e:
        return product_attributes_result