def action_results(model, version=None, ouput_variables=False): ''' Returns a JSON with whole results info for a given model and version ''' if model is None: return False, 'Empty model label' rdir = utils.model_path(model, version) if not os.path.isfile(os.path.join(rdir, 'results.pkl')): return False, 'results not found' from flame.conveyor import Conveyor conveyor = Conveyor() with open(os.path.join(rdir, 'results.pkl'), 'rb') as handle: conveyor.load(handle) return True, conveyor.getJSON()
def action_results(model, version=None, ouput_variables=False): ''' Returns an object with whole results info for a given model and version ''' if model is None: return False, {'code': 1, 'message': 'Empty model label'} results_path = utils.model_path(model, version) results_file = os.path.join(results_path, 'model-results.pkl') if not os.path.isfile(results_file): return False, {'code': 0, 'message': 'Results file not found'} conveyor = Conveyor() with open(results_file, 'rb') as handle: conveyor.load(handle) return True, conveyor
def action_searches_result(label): ''' try to retrieve the searches result with the label used as argument returns - (False, Null) if it there is no directory or the search pickle file cannot be found - (True, JSON) with the results otherwyse ''' opath = tempfile.gettempdir() if not os.path.isdir(opath): return False, f'directory {opath} not found' # default in case label was not provided if label is None: label = 'temp' iconveyor = Conveyor() search_pkl_path = os.path.join(opath, 'similars-' + label + '.pkl') if not os.path.isfile(search_pkl_path): return False, f'file {search_pkl_path} not found' with open(search_pkl_path, 'rb') as handle: success, message = iconveyor.load(handle) if not success: print(f'error reading prediction results with message {message}') return False, None if not iconveyor.isKey('search_results'): return False, 'search results not found' results = iconveyor.getVal('search_results') names = iconveyor.getVal('obj_nam') if iconveyor.isKey('SMILES'): smiles = iconveyor.getVal('SMILES') if len(results) != len(names): return False, 'results length does not match names' for i in range(len(results)): if iconveyor.isKey('SMILES'): print(f'similars to {names[i]} [{smiles[i]}]') else: print(f'similars to {names[i]}') iresult = results[i] for j in range(len(iresult['distances'])): dist = iresult['distances'][j] name = iresult['names'][j] smil = iresult['SMILES'][j] print(f' {dist:.3f} : {name} [{smil}]') # return a JSON generated by iconveyor return True, iconveyor.getJSON()
def action_predictions_result (label, output='text'): ''' try to retrieve the prediction result with the label used as argument returns - (False, Null) if it there is no directory or the predictions pickle files cannot be found - (True, object) with the results otherwyse ''' # get de model repo path predictions_path = pathlib.Path(utils.predictions_repository_path()) label_path = predictions_path.joinpath(label) if not os.path.isdir(label_path): if output != 'text': return False, {'code':0, 'message': f'directory {label_path} not found'} print (f'directory {label_path} not found') return False, None result_path = label_path.joinpath('prediction-results.pkl') if not result_path.is_file(): if output != 'text': return False, {'code':0, 'message': f'predictions not found for {label} directory'} print (f'predictions not found for {label} directory') return False, None iconveyor = Conveyor() with open(result_path, 'rb') as handle: success, message = iconveyor.load(handle) if not success: if output != 'text': return False, {'code':1, 'message': f'error reading prediction results with message {message}'} print (f'error reading prediction results with message {message}') return False, None # console output print_prediction_result(('obj_num','number of objects',iconveyor.getVal('obj_num'))) if iconveyor.isKey('external-validation'): for val in iconveyor.getVal('external-validation'): print_prediction_result (val) if iconveyor.isKey('values'): for i in range (iconveyor.getVal('obj_num')): print (iconveyor.getVal('obj_nam')[i], '\t', float("{0:.4f}".format(iconveyor.getVal('values')[i]))) # return iconveyor return True, iconveyor
def action_searches_result (label, output='text'): ''' try to retrieve the searches result with the label used as argument returns - (False, Null) if it there is no directory or the search pickle file cannot be found - (True, JSON) with the results otherwyse ''' opath = tempfile.gettempdir() if not os.path.isdir(opath): if output == 'JSON': return False, {'code':1, 'message': f'directory {opath} not found'} print (f'directory {opath} not found') return False, None # default in case label was not provided if label is None: label = 'temp' iconveyor = Conveyor() search_pkl_path = os.path.join(opath,'similars-'+label+'.pkl') if not os.path.isfile(search_pkl_path): if output == 'JSON': return False, {'code':0, 'message': f'predictions not found for {label} directory'} print (f'predictions not found for {label} directory') return False, f'file {search_pkl_path} not found' with open(search_pkl_path, 'rb') as handle: success, message = iconveyor.load(handle) if not success: if output == 'JSON': return False, {'code':1, 'message': f'error reading search results with message {message}'} print (f'error reading search results with message {message}') return False, None if not iconveyor.isKey('search_results'): if output == 'JSON': return False, {'code':1, 'message': 'search results not found'} return False, 'search results not found' results = iconveyor.getVal('search_results') names = iconveyor.getVal('obj_nam') if iconveyor.isKey('SMILES'): smiles = iconveyor.getVal('SMILES') if len (results) != len (names): if output == 'JSON': return False, {'code':1, 'message': 'results length does not match names'} return False, 'results length does not match names' for i in range (len(results)): if iconveyor.isKey('SMILES'): print (f'similars to {names[i]} [{smiles[i]}]') else: print (f'similars to {names[i]}') iresult = results[i] for j in range (len(iresult['distances'])): dist = iresult['distances'][j] if 'obj_name' in iresult: name = iresult['obj_nam'][j] else: name = '-' if 'SMILES' in iresult: smil = iresult['SMILES'][j] else: smil = '-' if 'obj_id' in iresult: idv = iresult['obj_id'][j] else: idv ='-' if 'ymatrix' in iresult: act = iresult['ymatrix'][j] else: act = '-' print (f' {dist:.3f} : {name} {idv} {act} [{smil}]') # return a JSON generated by iconveyor return True, iconveyor
def get_prediction_template(self): ''' This function creates a tabular model template based on the QMRF document type ''' # obtain the path and the default name of the results file results_file_path = utils.model_path(self.model, self.version) results_file_name = os.path.join(results_file_path, 'prediction-results.pkl') conveyor = Conveyor() # load the main class dictionary (p) from this yaml file if not os.path.isfile(results_file_name): raise Exception('Results file not found') try: with open(results_file_name, "rb") as input_file: conveyor.load(input_file) except Exception as e: # LOG.error(f'No valid results pickle found at: {results_file_name}') raise e # First get Name, Inchi and InChIkey names = conveyor.getVal('obj_nam') smiles = conveyor.getVal('SMILES') inchi = [AllChem.MolToInchi(AllChem.MolFromSmiles(m)) for m in smiles] inchikeys = [ AllChem.InchiToInchiKey( AllChem.MolToInchi(AllChem.MolFromSmiles(m))) for m in smiles ] predictions = [] applicability = [] if self.parameters['quantitative']['value']: raise ('Prediction template for quantitative endpoints' ' not implemented yet') if not self.parameters['conformal']['value']: predictions = conveyor.getVal('values') else: c0 = np.asarray(conveyor.getVal('c0')) c1 = np.asarray(conveyor.getVal('c1')) predictions = [] for i, j in zip(c0, c1): prediction = '' if i == j: prediction = 'out of AD' applicability.append('out') if i != j: if i == True: prediction = 'Inactive' else: prediction = 'Active' applicability.append('in') predictions.append(prediction) # Now create the spreedsheats for prediction # First write summary summary = ("Study name\n" + "Endpoint\n" + "QMRF-ID\n" + "(Target)Compounds\n" + "Compounds[compounds]\tName\tInChiKey\n") for name, inch in zip(names, inchikeys): summary += f'\t{name}\t{inch}\n' summary += ("\nFile\n" + "Author name\n" + "E-mail\n" + "Role\n" + "Affiliation\n" + "Date\n") with open('summary_document.tsv', 'w') as out: out.write(summary) # Now prediction details # Pandas is used to ease the table creation. reporting = pd.DataFrame() reporting['InChI'] = inchi reporting['CAS-RN'] = '-' reporting['SMILES'] = smiles reporting['prediction'] = predictions reporting['Applicability_domain'] = applicability reporting['reliability'] = '-' reporting['Structural_analogue_1_CAS'] = '-' reporting['Structural_analogue_1_smiles'] = '-' reporting['Structural_analogue_1_source'] = '-' reporting['Structural_analogue_1_experimental_value'] = '-' reporting['Structural_analogue_2_CAS'] = '-' reporting['Structural_analogue_2_smiles'] = '-' reporting['Structural_analogue_2_source'] = '-' reporting['Structural_analogue_2_experimental_value'] = '-' reporting['Structural_analogue_3_CAS'] = '-' reporting['Structural_analogue_3_smiles'] = '-' reporting['Structural_analogue_3_source'] = '-' reporting['Structural_analogue_3_experimental_value'] = '-' reporting.to_csv('prediction_report.tsv', sep='\t', index=False)
class Documentation: ''' Class storing the information needed to documentate models Fields are loaded from a YAML file (documentation.yaml) ... Attributes ---------- fields : dict fields in the documentation version : int documentation version Methods ------- load_parameters() Accesses to param file to retrieve all information needed to document the model. load_results() Accesses to build results to retrieve all information needed to document the model. assign_parameters() Fill documentation values corresponding to model parameter values assign_results() Assign result values to documentation fields get_upf_template() creates a spreedsheet QMRF-like get_prediction_template() Creates a reporting document for predictions ''' def __init__(self, model, version=0, context='model'): ''' Load the fields from the documentation file''' self.model = model self.version = version self.fields = None self.parameters = Parameters() self.conveyor = None # obtain the path and the default name of the model documents documentation_file_path = utils.model_path(self.model, self.version) documentation_file_name = os.path.join(documentation_file_path, 'documentation.yaml') # load the main class dictionary (p) from this yaml file if not os.path.isfile(documentation_file_name): raise Exception('Documentation file not found') try: with open(documentation_file_name, 'r') as documentation_file: self.fields = yaml.safe_load(documentation_file) except Exception as e: # LOG.error(f'Error loading documentation file with exception: {e}') raise e success, message = self.parameters.loadYaml(model, version) if not success: print( f'Parameters could not be loaded. {message}. Please make sure endpoint and version are correct' ) return # Remove this after acc #self.load_parameters() if context == 'model': self.load_results() self.assign_parameters() self.assign_results() self.autocomplete_documentation() self.setVal('md5', self.idataHash()) def safe_copy(inputfile): ''' this function makes sure that the input file contains only printable chars ''' def delta(self, model, version, doc, iformat='YAML', isSpace=False): ''' load a set of parameters from the configuration file present at the model directory also, inserts the keys present in the param_file provided, assuming that it contains a YAML-compatible format, like the one generated by manage adds some parameters identifying the model and the hash of the configuration file ''' # input is a string, either in JSON or YAML format # this is the typical input sent by if iformat not in ['JSON', 'JSONS', 'YAML', 'YAMLS']: return False, 'input format not recognized' if iformat == 'JSONS': try: newp = json.loads(doc) except Exception as e: return False, str(e) elif iformat == 'YAMLS': try: newp = yaml.load(doc) except Exception as e: return False, str(e) # input is a file, either in YAML or JSON format else: try: with open(doc, 'r') as pfile: if iformat == 'YAML': newp = yaml.safe_load(pfile) elif iformat == 'JSON': newp = json.load(pfile) except Exception as e: return False, str(e) # update interna dict with keys in the input file (delta) black_list = [] for key in newp: if key not in black_list: val = newp[key] # YAML define null values as 'None, which are interpreted # as strings if val == 'None': val = None if isinstance(val, dict): for inner_key in val: inner_val = val[inner_key] if inner_val == 'None': inner_val = None self.setInnerVal(key, inner_key, inner_val) #print ('@delta: adding',key, inner_key, inner_val) else: self.setVal(key, val) #print ('@delta: adding',key,val,type(val)) # dump internal dict to the parameters file if isSpace: parameters_file_path = utils.space_path(model, version) else: parameters_file_path = utils.model_path(model, version) parameters_file_name = os.path.join(parameters_file_path, 'documentation.yaml') try: with open(parameters_file_name, 'w') as pfile: yaml.dump(self.fields, pfile) except Exception as e: return False, 'unable to write parameters' self.setVal('md5', self.idataHash()) return True, 'OK' def load_results(self): ''' Load results pickle with model information ''' # obtain the path and the default name of the results file results_file_path = utils.model_path(self.model, self.version) results_file_name = os.path.join(results_file_path, 'model-results.pkl') self.conveyor = Conveyor() # load the main class dictionary (p) from this yaml file if not os.path.isfile(results_file_name): raise Exception('Results file not found') try: with open(results_file_name, "rb") as input_file: self.conveyor.load(input_file) except Exception as e: # LOG.error(f'No valid results pickle found at: # {results_file_name}') raise e def getVal(self, key): ''' Return the value of the key parameter or None if it is not found in the parameters dictionary ''' if not key in self.fields: return None if 'value' in self.fields[key]: return self.fields[key]['value'] return None def getDict(self, key): ''' Return the value of the key parameter or None if it ises. not found in the parameters dictionary ''' d = {} if not key in self.fields: return d element = self.fields[key]['value'] if isinstance(element, dict): # iterate keys and copy to the temp dictionary # the key and the content of 'value' for k, v in element.items(): if 'value' in v: d[k] = v['value'] return d def setVal(self, key, value): ''' Sets the parameter defined by key to the given value ''' # for existing keys, replace the contents of 'value' if key in self.fields: if "value" in self.fields[key]: if not isinstance(self.fields[key]['value'], dict): self.fields[key]["value"] = value # this should never happen, since value is never a dictionary # else: # for k in value.keys(): # self.fields[key][k] = value[k] # this behaviour is deprecated, do not add new keys # # for new keys, create a new element with 'value' key # else: # self.fields[key] = {'value': value} def setInnerVal(self, okey, ikey, value): ''' Sets a parameter within an internal dictionary. The entry is defined by a key of the outer dictionary (okey) and a second key in the inner dicctionary (ikey). The paramenter will be set to the given value This function test the existence of all the keys and dictionaries to prevent crashes and returns without setting the value if any error is found ''' if not okey in self.fields: return if not "value" in self.fields[okey]: return odict = self.fields[okey]['value'] if not isinstance(odict, dict): return # now we are sure that odict is the right inner dictionary if not ikey in odict: return # algorithm parameters not present in the template if not isinstance(odict[ikey], dict): odict['value'] = value return # keys present in the template if "value" in odict[ikey]: odict[ikey]["value"] = value def appVal(self, key, value): ''' Appends value to the end of existing key list ''' if not key in self.fields: return if "value" in self.fields[key]: vt = self.fields[key]['value'] # if the key is already a list, append the new value at the end if isinstance(vt, list): self.fields[key]['value'].append(value) # ... otherwyse, create a list with the previous content and the # new value else: self.fields[key]['value'] = [vt, value] def dumpJSON(self): return json.dumps(self.fields, allow_nan=True) def dumpYAML(self): yaml_out = [] order = [ 'ID', 'Version', 'Model_title', 'Model_description', 'Keywords', 'Contact', 'Institution', 'Date', 'Endpoint', 'Endpoint_units', 'Interpretation', 'Dependent_variable', 'Species', 'Limits_applicability', 'Experimental_protocol', 'Model_availability', 'Data_info', 'Algorithm', 'Software', 'Descriptors', 'Algorithm_settings', 'AD_method', 'AD_parameters', 'Goodness_of_fit_statistics', 'Internal_validation_1', 'Internal_validation_2', 'External_validation', 'Comments', 'Other_related_models', 'Date_of_QMRF', 'Date_of_QMRF_updates', 'QMRF_updates', 'References', 'QMRF_same_models', 'Mechanistic_basis', 'Mechanistic_references', 'Supporting_information', 'Comment_on_the_endpoint', 'Endpoint_data_quality_and_variability', 'Descriptor_selection' ] for ik in order: if ik in self.fields: k = ik v = self.fields[k] ivalue = '' idescr = '' ioptio = '' ## newest parameter formats are extended and contain ## rich metainformation for each entry if 'value' in v: if not isinstance(v['value'], dict): ivalue = v['value'] else: # print header of dictionary yaml_out.append(f'{k} :') # iterate keys assuming existence of value and description for intk in v['value']: intv = v['value'][intk] if not isinstance(intv, dict): yaml_out.append( f' {intk:27} : {str(intv):30}' ) #{iioptio} {iidescr}') else: #print(intk) intv = v['value'][intk] iivalue = '' if "value" in intv: iivalue = intv["value"] # else: # iivalue = intv iidescr = '' if "description" in intv and intv[ "description"] is not None: iidescr = intv["description"] iioptio = '' if 'options' in intv: toptio = intv['options'] if isinstance(toptio, list): if toptio != [None]: iioptio = f' {toptio}' if isinstance(iivalue, float): iivalue = f'{iivalue:f}' elif iivalue is None: iivalue = '' yaml_out.append( f' {intk:27} : {str(iivalue):30} #{iioptio} {iidescr}' ) continue if 'description' in v: idescr = v['description'] if 'options' in v: toptio = v['options'] if isinstance(toptio, list): ioptio = f' {toptio}' yaml_out.append( f'{k:30} : {str(ivalue):30} #{ioptio} {idescr}') return (yaml_out) def dumpExcel(self, oname): # openpyxl should be installed in the environment # pip install openpyxl from openpyxl import Workbook from openpyxl.styles import Font, NamedStyle, Alignment # from openpyxl.comments import Comment wb = Workbook() ws = wb.active ws.title = f"Model {self.model} documentation" alignment_style = Alignment(vertical='top', wrapText=True) # Label Style Label = NamedStyle(name="Label") Label.font = Font(name='Calibri', size=11, bold=True) Label.alignment = alignment_style ws.column_dimensions['A'].width = 25.10 ws.column_dimensions['B'].width = 28.00 ws.column_dimensions['C'].width = 60.00 ws.column_dimensions['D'].width = 60.00 # sections of the document, specifying the document keys which will be listed sections = [ ('General model information', [ 'ID', 'Version', 'Model_title', 'Model_description', 'Keywords', 'Contact', 'Institution', 'Date', 'Endpoint', 'Endpoint_units', 'Interpretation', 'Dependent_variable', 'Species', 'Limits_applicability', 'Experimental_protocol', 'Model_availability', 'Data_info' ]), ('Algorithm and software', [ 'Algorithm', 'Software', 'Descriptors', 'Algorithm_settings', 'AD_method', 'AD_parameters', 'Goodness_of_fit_statistics', 'Internal_validation_1', 'Internal_validation_2', 'External_validation', 'Comments' ]), ('Other information', [ 'Other_related_models', 'Date_of_QMRF', 'Date_of_QMRF_updates', 'QMRF_updates', 'References', 'QMRF_same_models', 'Mechanistic_basis', 'Mechanistic_references', 'Supporting_information', 'Comment_on_the_endpoint', 'Endpoint_data_quality_and_variability', 'Descriptor_selection' ]) ] #Save the position and name of the label for the first and last section position = [] name = [sections[0][1][0], 'Other Comments'] count = 1 for isection in sections: for ik in isection[1]: label_k = ik.replace('_', ' ') if label_k == 'Internal validation 2' or label_k == 'External validation': ws[f"A{count}"] = label_k ws[f'A{count}'].style = Label else: ws[f"B{count}"] = label_k ws[f"B{count}"].style = Label if ik in self.fields: # set defaults for value ivalue = '' #v is the selected entry in the documentation dictionary v = self.fields[ik] ## newest parameter formats are extended and contain ## rich metainformation for each entry if 'value' in v: ivalue = v['value'] if isinstance(ivalue, dict): ws[f"A{count}"] = label_k ws[f"A{count}"].style = Label end = (count) + (len(ivalue) - 1) for intk in ivalue: label_ik = intk.replace('_', ' ') # label_ik = intk.replace('_f', '').replace('_', ' ') ws[f'B{count}'] = label_ik ws[f'B{count}'].style = Label intv = ivalue[intk] if not isinstance(intv, dict): iivalue = intv if iivalue is None: iivalue = " " else: intv = ivalue[intk] iivalue = '' if 'value' in intv: iivalue = intv["value"] if iivalue is None: iivalue = '' ws[f'D{count}'] = intv['description'] ws[f'D{count}'].alignment = alignment_style ws[f'C{count}'] = f'{str(iivalue)}' ws[f'C{count}'].font = Font(name='Calibri', size=11, color='3465a4') ws[f'C{count}'].alignment = alignment_style ws.merge_cells(f'A{count}:A{end}') count += 1 else: ws[f'D{count}'] = v['description'] ws[f'D{count}'].alignment = alignment_style if label_k == 'Experimental protocol' or label_k == 'Comments': position.append(count) if ivalue is None: ivalue = '' ws[f'C{count}'] = f'{str(ivalue)}' ws[f'C{count}'].font = Font(name='Calibri', size=11, color='3465a4') ws[f'C{count}'].alignment = alignment_style count += 1 itr = 0 for i in position: if itr == 0: ws[f'A{1}'] = name[itr] ws[f"A{1}"].style = Label ws.merge_cells(f'A{1}:A{i}') else: ws[f'A{i}'] = name[itr] ws[f"A{i}"].style = Label ws.merge_cells(f'A{i}:A{count-1}') itr += 1 try: wb.save(oname) except: return False, f'error saving document as {oname}' return True, 'OK' def dumpWORD(self, oname): # python-docx should be installed in the environment # pip install python-docx from docx import Document from docx.shared import Pt from docx.shared import RGBColor # most of the formatting is included in this template, where we # redefined default styles for Normal, 'heading 1' and 'Table Grid' # # note that this template can be easily customized with a company # or project logo path = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(path, 'children') path = os.path.join(path, 'documentation_template.docx') document = Document(path) # define style for normal and heading 1 # normal_style = document.styles['Normal'] # normal_font = normal_style.font # normal_font.name = 'Calibri' # normal_font.size = Pt(10) # heading_style = document.styles['heading 1'] # heading_font = heading_style.font # heading_font.name = 'Calibri' # heading_font.color.rgb = RGBColor(0x00, 0x00, 0x00) # heading_font.size = Pt(12) # withd of column 1 and 2 wcol1 = 1400000 wcol2 = 4200000 # withd of internal columns i and 2 wicol1 = 1200000 wicol2 = 2900000 # sections of the document, specifying the document keys which will be listed sections = [ ('General model information', [ 'ID', 'Version', 'Model_title', 'Model_description', 'Keywords', 'Contact', 'Institution', 'Date', 'Endpoint', 'Endpoint_units', 'Interpretation', 'Dependent_variable', 'Species', 'Limits_applicability', 'Experimental_protocol', 'Model_availability', 'Data_info' ]), ('Algorithm and software', [ 'Algorithm', 'Software', 'Descriptors', 'Algorithm_settings', 'AD_method', 'AD_parameters', 'Goodness_of_fit_statistics', 'Internal_validation_1', 'Internal_validation_2', 'External_validation', 'Comments' ]), ('Other information', [ 'Other_related_models', 'Date_of_QMRF', 'Date_of_QMRF_updates', 'QMRF_updates', 'References', 'QMRF_same_models', 'Mechanistic_basis', 'Mechanistic_references', 'Supporting_information', 'Comment_on_the_endpoint', 'Endpoint_data_quality_and_variability', 'Descriptor_selection' ]) ] for isection in sections: # heading with the section name document.add_heading(isection[0], level=1) # table with one row per key table = document.add_table(rows=len(isection[1]), cols=2) table.style = 'Table Grid' table.autofit = False count = 0 for ik in isection[1]: # add a row and format two columns row = table.rows[count] row.cells[0].width = wcol1 row.cells[1].width = wcol2 label_k = ik.replace('_', ' ') row.cells[0].text = f'{label_k}' count = count + 1 # define value if ik in self.fields: # set defaults for value ivalue = '' # v is the selected entry in the documentation dictionary v = self.fields[ik] ## newest parameter formats are extended and contain ## rich metainformation for each entry if 'value' in v: ivalue = v['value'] # if ivalue is a dictionary create a nested table and iterate # to represent the keys within if isinstance(ivalue, dict): row.cells[0].text = f'{label_k}' itable = row.cells[1].add_table(rows=len(ivalue), cols=2) itable.style = 'Table Grid' itable.autofit = False icount = 0 # iterate keys assuming existence of value and description for intk in ivalue: label_ik = intk.replace('_', ' ') # label_ik = intk.replace('_f', '').replace('_', ' ') irow = itable.rows[icount] irow.cells[0].width = wicol1 irow.cells[1].width = wicol2 icount = icount + 1 intv = ivalue[intk] if not isinstance(intv, dict): iivalue = intv else: intv = ivalue[intk] iivalue = '' if "value" in intv: iivalue = intv["value"] if isinstance(iivalue, float): iivalue = f'{iivalue:f}' elif iivalue is None: iivalue = '' irow.cells[0].text = f'{label_ik}' irow.cells[1].text = f'{str(iivalue)}' # if the key is not a dictionary just insert the value inside else: if ivalue is None: ivalue = '' row.cells[1].text = f'{str(ivalue)}' try: document.save(oname) except: return False, f'error saving document as {oname}' return True, 'OK' def assign_parameters(self): ''' Fill documentation values corresponding to model parameter values ''' if not self.parameters: raise ('Parameters were not loaded') # self.fields['Algorithm']['subfields']['algorithm']['value'] = \ # self.parameters.getVal('model') self.setInnerVal('Algorithm', 'algorithm', self.parameters.getVal('model')) if self.parameters.getVal('input_type') == 'molecule': self.setInnerVal('Algorithm', 'descriptors', self.parameters.getVal('computeMD_method')) cv_method = f'{self.parameters.getVal("ModelValidationCV")} ({str(self.parameters.getVal("ModelValidationN"))})' self.setInnerVal('Algorithm', 'cross-validation', cv_method) features = self.parameters.getVal("feature_selection") if features is not None: features += f' ({self.parameters.getVal("feature_number")})' self.setInnerVal('Descriptors', 'descriptors', self.parameters.getVal('computeMD_method')) self.setInnerVal('Descriptors', 'scaling', self.parameters.getVal('modelAutoscaling')) self.setInnerVal('Descriptors', 'selection_method', features) elif self.parameters.getVal('input_type') == 'model_ensemble': self.setInnerVal('Descriptors', 'descriptors', 'ensemble models') if self.parameters.getVal('conformal'): self.setInnerVal('AD_method', 'name', 'conformal prediction') # self.setInnerVal('AD_parameters', 'confidence', f'{self.parameters.getVal("conformalConfidence")}') conformal_settings_dict = {} conformal_settings_dict['confidence'] = self.parameters.getVal( "conformalConfidence") conformal_settings = self.parameters.getVal('conformal_settings') if conformal_settings is not None: for key in conformal_settings: conformal_settings_dict[key] = conformal_settings[key][ "value"] self.fields['AD_parameters']['value'] = conformal_settings_dict def assign_results(self): ''' Assign result values to documentation fields ''' # Accepted validation keys # allowed = ['Conformal_accuracy', 'Conformal_mean_interval', # 'Conformal_coverage', 'Conformal_accuracy', # 'Q2', 'SDEP', # 'SensitivityPred', 'SpecificityPred', 'MCCpred'] # gof_allowed = ['R2', 'SDEC', 'scoringR' # 'Sensitivity', 'Specificity', 'MCC'] allowed = [ 'Conformal_accuracy', 'Conformal_mean_interval', 'Conformal_coverage', 'Q2', 'SDEP', 'scoringP', 'Sensitivity', 'Specificity', 'MCC' ] gof_allowed = [ 'Conformal_accuracy_f', 'Conformal_mean_interval_f', 'Conformal_coverage_f', 'R2', 'SDEC', 'scoringR', 'Sensitivity_f', 'Specificity_f', 'MCC_f' ] model_info = self.conveyor.getVal('model_build_info') validation = self.conveyor.getVal('model_valid_info') # print(model_info) # The code below to filter the hyperparameters to be # reported. # Get parameter keys for the used estimator #param_key = self.parameters.getVal('model') + '_parameters' # Get parameter dictionary #estimator_params = self.parameters.getDict(param_key) self.fields['Algorithm_settings']['value'] = \ (self.conveyor.getVal('estimator_parameters')) # print (self.conveyor.getVal('estimator_parameters')) # Horrendous patch to solve backcompatibility problem if 'subfields' in self.fields['Data_info']: sub_label = 'subfields' else: sub_label = 'value' self.fields['Data_info'][sub_label]['training_set_size']['value'] = \ model_info[0][2] self.fields['Descriptors'][sub_label]['final_number']['value'] = \ model_info[1][2] self.fields['Descriptors'][sub_label]['ratio']['value'] = \ '{:0.2f}'.format(model_info[1][2]/model_info[0][2]) internal_val = dict() for stat in validation: if stat[0] in allowed: internal_val[stat[0]] = float("{0:.2f}".format(stat[2])) if internal_val: self.fields['Internal_validation_1']\ ['value'] = internal_val gof = dict() for stat in validation: if stat[0] in gof_allowed: gof[stat[0]] = float("{0:.2f}".format(stat[2])) if gof: self.fields['Goodness_of_fit_statistics']\ ['value'] = gof def get_string(self, dictionary): ''' Convert a dictionary (from documentation.yaml) to string format for the model template ''' text = '' for key, val in dictionary.items(): text += f'{key} : {val["value"]}\n' return text def get_string2(self, dictionary): ''' Convert a dictionary (from parameter file) to string format for the model template ''' text = '' for key, val in dictionary.items(): try: if isinstance(str(val), str): text += f'{key} : {val}\n' except: continue return text def get_upf_template(self): ''' This function creates a tabular model template based on the QMRF document type ''' template = pd.DataFrame() template['ID'] = [''] template['Version'] = [''] template['Description'] = [''] template['Contact'] = [''] template['Institution'] = [''] template['Date'] = [''] template['Endpoint'] = [''] template['Endpoint_units'] = [''] template['Dependent_variable'] = [''] template['Species'] = [''] template['Limits_applicability'] = [''] template['Experimental_protocol'] = [''] template['Data_info'] = [ self.get_string(self.fields['Data_info']['subfields']) ] template['Model_availability'] = [\ self.get_string(self.fields['Model_availability'] ['subfields'])] template['Algorithm'] = [ self.get_string(self.fields['Algorithm']['subfields']) ] template['Software'] = [ self.get_string(self.fields['Software']['subfields']) ] template['Descriptors'] = [ self.get_string(self.fields['Descriptors']['subfields']) ] template['Algorithm_settings'] = [ self.get_string(self.fields['Algorithm_settings']['subfields']) ] template['AD_method'] = [ self.get_string(self.fields['AD_method']['subfields']) ] template['AD_parameters'] = [self.fields['AD_parameters']['value']] template['Goodness_of_fit_statistics'] = [self.fields\ ['Goodness_of_fit_statistics']['value']] template['Internal_validation_1'] = [ self.fields['Internal_validation_1']['value'] ] template.to_csv('QMRF_template.tsv', sep='\t') def get_upf_template2(self): ''' This function creates a tabular model template based on the QMRF document type ''' fields = ['ID', 'Version', 'Contact', 'Institution',\ 'Date', 'Endpoint', 'Endpoint_units', 'Dependent_variable', 'Species',\ 'Limits_applicability', 'Experimental_protocol', 'Data_info',\ 'Model_availability', 'Algorithm', 'Software', 'Descriptors',\ 'Algorithm_settings', 'AD_method', 'AD_parameters',\ 'Goodness_of_fit_statistics', 'Internal_validation_1' ] template = pd.DataFrame( columns=['Field', 'Parameter name', 'Parameter value']) for field in fields: try: subfields = self.fields[field]['subfields'] except: subfields = self.fields[field]['value'] if subfields is not None: for index, subfield in enumerate(subfields): field2 = '' if index == 0: field2 = field else: field2 = "" value = str(subfields[subfield]['value']) # None types are retrieved as str from yaml?? if value == "None": value = "" row = dict(zip(['Field', 'Parameter name', 'Parameter value'],\ [field2, subfield, value])) template = template.append(row, ignore_index=True) else: value = str(self.fields[field]['value']) if value == 'None': value = "" row = dict(zip(['Field', 'Parameter name', 'Parameter value'],\ [field, "", value])) template = template.append(row, ignore_index=True) template.to_csv('QMRF_template3.tsv', sep='\t', index=False) def get_prediction_template(self): ''' This function creates a tabular model template based on the QMRF document type ''' # obtain the path and the default name of the results file results_file_path = utils.model_path(self.model, self.version) results_file_name = os.path.join(results_file_path, 'prediction-results.pkl') conveyor = Conveyor() # load the main class dictionary (p) from this yaml file if not os.path.isfile(results_file_name): raise Exception('Results file not found') try: with open(results_file_name, "rb") as input_file: conveyor.load(input_file) except Exception as e: # LOG.error(f'No valid results pickle found at: {results_file_name}') raise e # First get Name, Inchi and InChIkey names = conveyor.getVal('obj_nam') smiles = conveyor.getVal('SMILES') inchi = [AllChem.MolToInchi(AllChem.MolFromSmiles(m)) for m in smiles] inchikeys = [ AllChem.InchiToInchiKey( AllChem.MolToInchi(AllChem.MolFromSmiles(m))) for m in smiles ] predictions = [] applicability = [] if self.parameters['quantitative']['value']: raise ('Prediction template for quantitative endpoints' ' not implemented yet') if not self.parameters['conformal']['value']: predictions = conveyor.getVal('values') else: c0 = np.asarray(conveyor.getVal('c0')) c1 = np.asarray(conveyor.getVal('c1')) predictions = [] for i, j in zip(c0, c1): prediction = '' if i == j: prediction = 'out of AD' applicability.append('out') if i != j: if i == True: prediction = 'Inactive' else: prediction = 'Active' applicability.append('in') predictions.append(prediction) # Now create the spreedsheats for prediction # First write summary summary = ("Study name\n" + "Endpoint\n" + "QMRF-ID\n" + "(Target)Compounds\n" + "Compounds[compounds]\tName\tInChiKey\n") for name, inch in zip(names, inchikeys): summary += f'\t{name}\t{inch}\n' summary += ("\nFile\n" + "Author name\n" + "E-mail\n" + "Role\n" + "Affiliation\n" + "Date\n") with open('summary_document.tsv', 'w') as out: out.write(summary) # Now prediction details # Pandas is used to ease the table creation. reporting = pd.DataFrame() reporting['InChI'] = inchi reporting['CAS-RN'] = '-' reporting['SMILES'] = smiles reporting['prediction'] = predictions reporting['Applicability_domain'] = applicability reporting['reliability'] = '-' reporting['Structural_analogue_1_CAS'] = '-' reporting['Structural_analogue_1_smiles'] = '-' reporting['Structural_analogue_1_source'] = '-' reporting['Structural_analogue_1_experimental_value'] = '-' reporting['Structural_analogue_2_CAS'] = '-' reporting['Structural_analogue_2_smiles'] = '-' reporting['Structural_analogue_2_source'] = '-' reporting['Structural_analogue_2_experimental_value'] = '-' reporting['Structural_analogue_3_CAS'] = '-' reporting['Structural_analogue_3_smiles'] = '-' reporting['Structural_analogue_3_source'] = '-' reporting['Structural_analogue_3_experimental_value'] = '-' reporting.to_csv('prediction_report.tsv', sep='\t', index=False) def idataHash(self): ''' Create a md5 hash for a number of keys describing parameters relevant for idata This hash is compared between runs, to check wether idata must recompute or not the MD ''' # update with any new idata relevant parameter keylist = [ 'SDFile_name', 'SDFile_activity', 'SDFile_experimental', 'normalize_method', 'ionize_method', 'convert3D_method', 'computeMD_method', 'TSV_objnames', 'TSV_activity', 'input_type' ] idata_params = [] for i in keylist: idata_params.append(self.getVal(i)) # MD_settings is a dictionary, obtain and sort the keys+values md_params = self.getDict('MD_settings') md_list = [] for key in md_params: # combine key + value in a single string md_list.append(key + str(md_params[key])) idata_params.append(md_list.sort()) # use picke as a buffered object, neccesary to generate the hexdigest p = pickle.dumps(idata_params) return hashlib.md5(p).hexdigest() def empty_fields(self): ''' This function checks which fields do not contain values ''' emptyfields = [] for ik in self.fields: v = self.fields[ik] if 'value' in v: ivalue = v['value'] if isinstance(ivalue, dict): for intk in ivalue: intv = ivalue[intk] if not isinstance(intv, dict): iivalue = intv if iivalue is None or len(str(iivalue)) is 0: emptyfields.append(intk) else: intv = ivalue[intk] iivalue = '' if intv["value"] is None or len(str( intv["value"])) is 0: emptyfields.append(intk) else: if ivalue is None or len(str(ivalue)) is 0: emptyfields.append(ik) return emptyfields def get_mols(self): return dict( zip(self.conveyor.getVal("obj_nam"), self.conveyor.getVal("SMILES"))) def autocomplete_documentation(self): """ Auto complete fields in model documentation """ #ID, Model identifier. self.fields['ID']['value'] = utils.getModelID(self.model, self.version, 'model')[1] #Version self.fields['Version']['value'] = str(self.version) #Date, Date of model development and Date of QMRF. today = date.today().strftime("%B %d, %Y") self.fields['Date']['value'] = today self.fields['Date_of_QMRF']['value'] = today #format, Format used(SDF,TSV) if self.parameters.getVal('input_type') == 'data': self.fields['Data_info']['value']['format']['value'] = 'TSV' else: self.fields['Data_info']['value']['format']['value'] = 'SDF' #Algorithm, type: QSAR. self.fields['Algorithm']['value']['type']['value'] = 'QSAR' #Model, Main modelling program, version, description and license. software = "Flame, 1.0rc3" fieldsapplysoftware = ['model', 'descriptors', 'applicability_domain'] for field in fieldsapplysoftware: if field == 'applicability_domain': if self.parameters.getVal('conformal'): self.fields['Software']['value'][field]['value'] = software else: self.fields['Software']['value'][field]['value'] = software
class Documentation: ''' Class storing the information needed to documentate models Fields are loaded from a YAML file (documentation.yaml) ... Attributes ---------- fields : dict fields in the documentation version : int documentation version Methods ------- load_parameters() Accesses to param file to retrieve all information needed to document the model. load_results() Accesses to build results to retrieve all information needed to document the model. assign_parameters() Fill documentation values corresponding to model parameter values assign_results() Assign result values to documentation fields get_upf_template() creates a spreedsheet QMRF-like get_prediction_template() Creates a reporting document for predictions ''' def __init__(self, model, version=0, context='model'): ''' Load the fields from the documentation file''' self.model = model self.version = version self.fields = None self.parameters = Parameters() self.conveyor = None # obtain the path and the default name of the model documents documentation_file_path = utils.model_path(self.model, self.version) documentation_file_name = os.path.join(documentation_file_path, 'documentation.yaml') # load the main class dictionary (p) from this yaml file if not os.path.isfile(documentation_file_name): raise Exception('Documentation file not found') try: with open(documentation_file_name, 'r') as documentation_file: self.fields = yaml.safe_load(documentation_file) except Exception as e: # LOG.error(f'Error loading documentation file with exception: {e}') raise e success, message = self.parameters.loadYaml(model, 0) if not success: print( 'Parameters could not be loaded. Please assure endpoint is correct' ) return # Remove this after acc #self.load_parameters() if context == 'model': self.load_results() self.assign_parameters() self.assign_results() self.setVal('md5', self.idataHash()) def delta(self, model, version, doc, iformat='YAML', isSpace=False): ''' load a set of parameters from the configuration file present at the model directory also, inserts the keys present in the param_file provided, assuming that it contains a YAML-compatible format, like the one generated by manage adds some parameters identifying the model and the hash of the configuration file ''' # if not self.loadYaml(model, version, isSpace): # return False, 'file not found' # parse parameter file assuning it will be in # a YAML-compatible format if iformat == 'JSONS': try: newp = json.loads(doc) except Exception as e: return False, e else: try: with open(doc, 'r') as pfile: if iformat == 'YAML': newp = yaml.safe_load(pfile) elif iformat == 'JSON': newp = json.load(pfile) except Exception as e: return False, e # update interna dict with keys in the input file (delta) black_list = [] for key in newp: if key not in black_list: val = newp[key] # YAML define null values as 'None, which are interpreted # as strings if val == 'None': val = None if isinstance(val, dict): for inner_key in val: inner_val = val[inner_key] if inner_val == 'None': inner_val = None self.setInnerVal(key, inner_key, inner_val) #print ('@delta: adding',key, inner_key, inner_val) else: self.setVal(key, val) #print ('@delta: adding',key,val,type(val)) # dump internal dict to the parameters file if isSpace: parameters_file_path = utils.space_path(model, version) else: parameters_file_path = utils.model_path(model, version) parameters_file_name = os.path.join(parameters_file_path, 'documentation.yaml') try: with open(parameters_file_name, 'w') as pfile: yaml.dump(self.fields, pfile) except Exception as e: return False, 'unable to write parameters' self.setVal('md5', self.idataHash()) return True, 'OK' def load_results(self): ''' Load results pickle with model information ''' # obtain the path and the default name of the results file results_file_path = utils.model_path(self.model, self.version) results_file_name = os.path.join(results_file_path, 'results.pkl') self.conveyor = Conveyor() # load the main class dictionary (p) from this yaml file if not os.path.isfile(results_file_name): raise Exception('Results file not found') try: with open(results_file_name, "rb") as input_file: self.conveyor.load(input_file) except Exception as e: # LOG.error(f'No valid results pickle found at: # {results_file_name}') raise e def getVal(self, key): ''' Return the value of the key parameter or None if it is not found in the parameters dictionary ''' if not key in self.fields: return None if 'value' in self.fields[key]: return self.fields[key]['value'] return None def getDict(self, key): ''' Return the value of the key parameter or None if it ises. not found in the parameters dictionary ''' d = {} if not key in self.fields: return d element = self.fields[key]['value'] if isinstance(element, dict): # iterate keys and copy to the temp dictionary # the key and the content of 'value' for k, v in element.items(): if 'value' in v: d[k] = v['value'] return d def setVal(self, key, value): ''' Sets the parameter defined by key to the given value ''' # for existing keys, replace the contents of 'value' if key in self.fields: if "value" in self.fields[key]: if not isinstance(self.fields[key]['value'], dict): self.fields[key]["value"] = value else: # print(key) for k in value.keys(): self.fields[key][k] = value[k] # for new keys, create a new element with 'value' key else: self.fields[key] = {'value': value} def setInnerVal(self, okey, ikey, value): ''' Sets a parameter within an internal dictionary. The entry is defined by a key of the outer dictionary (okey) and a second key in the inner dicctionary (ikey). The paramenter will be set to the given value This function test the existence of all the keys and dictionaries to prevent crashes and returns without setting the value if any error is found ''' if not okey in self.fields: return if not "value" in self.fields[okey]: return odict = self.fields[okey]['value'] if not isinstance(odict, dict): return if not ikey in odict: return if not isinstance(odict[ikey], dict): odict['value'] = value return if "value" in odict[ikey]: odict[ikey]["value"] = value else: odict[ikey] = {'value': value} def appVal(self, key, value): ''' Appends value to the end of existing key list ''' if not key in self.fields: return if "value" in self.fields[key]: vt = self.fields[key]['value'] # if the key is already a list, append the new value at the end if isinstance(vt, list): self.fields[key]['value'].append(value) # ... otherwyse, create a list with the previous content and the # new value else: self.fields[key]['value'] = [vt, value] def dumpJSON(self): return json.dumps(self.fields) def assign_parameters(self): ''' Fill documentation values corresponding to model parameter values ''' if not self.parameters: raise ('Parameters were not loaded') # self.fields['Algorithm']['subfields']['algorithm']['value'] = \ # self.parameters.getVal('model') self.setInnerVal('Algorithm', 'algorithm', self.parameters.getVal('model')) self.setInnerVal('Algorithm', 'descriptors', self.parameters.getVal('computeMD_method')) if self.parameters.getVal('conformal'): self.setInnerVal('AD_method', 'name', 'conformal prediction') self.setVal( 'AD_parameters', f'Conformal Significance ' f'{self.parameters.getVal("conformalSignificance")}') def assign_results(self): ''' Assign result values to documentation fields ''' # Accepted validation keys allowed = [ 'Conformal_accuracy', 'Conformal_mean_interval', 'Sensitivity', 'Specificity', 'MCC', 'Conformal_coverage', 'Conformal_accuracy', 'Q2', 'SDEP', 'SensitivityPed', 'SpecificityPred', 'SpecificityPred', 'MCCpred', 'scoringR', 'R2', 'SDEC' ] model_info = self.conveyor.getVal('model_build_info') validation = self.conveyor.getVal('model_valid_info') # The code below to filter the hyperparameters to be # reported. # Get parameter keys for the used estimator #param_key = self.parameters.getVal('model') + '_parameters' # Get parameter dictionary #estimator_params = self.parameters.getDict(param_key) self.fields['Algorithm_settings']['value'] = \ (self.conveyor.getVal('estimator_parameters')) # Horrendous patch to solve backcompatibility problem if 'subfields' in self.fields['Data_info']: sub_label = 'subfields' else: sub_label = 'value' self.fields['Data_info']\ [sub_label]['training_set_size']['value'] = \ model_info[0][2] self.fields['Data_info']\ [sub_label]['training_set_size']['value'] = \ model_info[0][2] self.fields['Descriptors']\ [sub_label]['final_number']['value'] = \ model_info[1][2] self.fields['Descriptors']\ [sub_label]['ratio']['value'] = \ '{:0.2f}'.format(model_info[1][2]/model_info[0][2]) internal_val = dict() for stat in validation: if stat[0] in allowed: internal_val[stat[0]] = float("{0:.2f}".format(stat[2])) if internal_val: self.fields['Internal_validation_1']\ ['value'] = internal_val def get_string(self, dictionary): ''' Convert a dictionary (from documentation.yaml) to string format for the model template ''' text = '' for key, val in dictionary.items(): text += f'{key} : {val["value"]}\n' return text def get_string2(self, dictionary): ''' Convert a dictionary (from parameter file) to string format for the model template ''' text = '' for key, val in dictionary.items(): try: if isinstance(str(val), str): text += f'{key} : {val}\n' except: continue return text def get_upf_template(self): ''' This function creates a tabular model template based on the QMRF document type ''' template = pd.DataFrame() template['ID'] = [''] template['Version'] = [''] template['Description'] = [''] template['Contact'] = [''] template['Institution'] = [''] template['Date'] = [''] template['Endpoint'] = [''] template['Endpoint_units'] = [''] template['Dependent_variable'] = [''] template['Species'] = [''] template['Limits_applicability'] = [''] template['Experimental_protocol'] = [''] template['Data_info'] = [ self.get_string(self.fields['Data_info']['subfields']) ] template['Model_availability'] = [\ self.get_string(self.fields['Model_availability'] ['subfields'])] template['Algorithm'] = [ self.get_string(self.fields['Algorithm']['subfields']) ] template['Software'] = [ self.get_string(self.fields['Software']['subfields']) ] template['Descriptors'] = [ self.get_string(self.fields['Descriptors']['subfields']) ] template['Algorithm_settings'] = [ self.get_string(self.fields['Algorithm_settings']['subfields']) ] template['AD_method'] = [ self.get_string(self.fields['AD_method']['subfields']) ] template['AD_parameters'] = [self.fields['AD_parameters']['value']] template['Goodness_of_fit_statistics'] = [self.fields\ ['Goodness_of_fit_statistics']['value']] template['Internal_validation_1'] = [ self.fields['Internal_validation_1']['value'] ] template.to_csv('QMRF_template.tsv', sep='\t') def get_upf_template2(self): ''' This function creates a tabular model template based on the QMRF document type ''' fields = ['ID', 'Version', 'Contact', 'Institution',\ 'Date', 'Endpoint', 'Endpoint_units', 'Dependent_variable', 'Species',\ 'Limits_applicability', 'Experimental_protocol', 'Data_info',\ 'Model_availability', 'Algorithm', 'Software', 'Descriptors',\ 'Algorithm_settings', 'AD_method', 'AD_parameters',\ 'Goodness_of_fit_statistics', 'Internal_validation_1' ] template = pd.DataFrame( columns=['Field', 'Parameter name', 'Parameter value']) for field in fields: try: subfields = self.fields[field]['subfields'] except: subfields = self.fields[field]['value'] if subfields is not None: for index, subfield in enumerate(subfields): field2 = '' if index == 0: field2 = field else: field2 = "" value = str(subfields[subfield]['value']) # None types are retrieved as str from yaml?? if value == "None": value = "" row = dict(zip(['Field', 'Parameter name', 'Parameter value'],\ [field2, subfield, value])) template = template.append(row, ignore_index=True) else: value = str(self.fields[field]['value']) if value == 'None': value = "" row = dict(zip(['Field', 'Parameter name', 'Parameter value'],\ [field, "", value])) template = template.append(row, ignore_index=True) template.to_csv('QMRF_template3.tsv', sep='\t', index=False) def get_prediction_template(self): ''' This function creates a tabular model template based on the QMRF document type ''' # obtain the path and the default name of the results file results_file_path = utils.model_path(self.model, self.version) results_file_name = os.path.join(results_file_path, 'prediction-results.pkl') conveyor = Conveyor() # load the main class dictionary (p) from this yaml file if not os.path.isfile(results_file_name): raise Exception('Results file not found') try: with open(results_file_name, "rb") as input_file: conveyor.load(input_file) except Exception as e: # LOG.error(f'No valid results pickle found at: {results_file_name}') raise e # First get Name, Inchi and InChIkey names = conveyor.getVal('obj_nam') smiles = conveyor.getVal('SMILES') inchi = [AllChem.MolToInchi(AllChem.MolFromSmiles(m)) for m in smiles] inchikeys = [ AllChem.InchiToInchiKey( AllChem.MolToInchi(AllChem.MolFromSmiles(m))) for m in smiles ] predictions = [] applicability = [] if self.parameters['quantitative']['value']: raise ('Prediction template for quantitative endpoints' ' not implemented yet') if not self.parameters['conformal']['value']: predictions = conveyor.getVal('values') else: c0 = np.asarray(conveyor.getVal('c0')) c1 = np.asarray(conveyor.getVal('c1')) predictions = [] for i, j in zip(c0, c1): prediction = '' if i == j: prediction = 'out of AD' applicability.append('out') if i != j: if i == True: prediction = 'Inactive' else: prediction = 'Active' applicability.append('in') predictions.append(prediction) # Now create the spreedsheats for prediction # First write summary summary = ("Study name\n" + "Endpoint\n" + "QMRF-ID\n" + "(Target)Compounds\n" + "Compounds[compounds]\tName\tInChiKey\n") for name, inch in zip(names, inchikeys): summary += f'\t{name}\t{inch}\n' summary += ("\nFile\n" + "Author name\n" + "E-mail\n" + "Role\n" + "Affiliation\n" + "Date\n") with open('summary_document.tsv', 'w') as out: out.write(summary) # Now prediction details # Pandas is used to ease the table creation. reporting = pd.DataFrame() reporting['InChI'] = inchi reporting['CAS-RN'] = '-' reporting['SMILES'] = smiles reporting['prediction'] = predictions reporting['Applicability_domain'] = applicability reporting['reliability'] = '-' reporting['Structural_analogue_1_CAS'] = '-' reporting['Structural_analogue_1_smiles'] = '-' reporting['Structural_analogue_1_source'] = '-' reporting['Structural_analogue_1_experimental_value'] = '-' reporting['Structural_analogue_2_CAS'] = '-' reporting['Structural_analogue_2_smiles'] = '-' reporting['Structural_analogue_2_source'] = '-' reporting['Structural_analogue_2_experimental_value'] = '-' reporting['Structural_analogue_3_CAS'] = '-' reporting['Structural_analogue_3_smiles'] = '-' reporting['Structural_analogue_3_source'] = '-' reporting['Structural_analogue_3_experimental_value'] = '-' reporting.to_csv('prediction_report.tsv', sep='\t', index=False) def idataHash(self): ''' Create a md5 hash for a number of keys describing parameters relevant for idata This hash is compared between runs, to check wether idata must recompute or not the MD ''' # update with any new idata relevant parameter keylist = [ 'SDFile_name', 'SDFile_activity', 'SDFile_experimental', 'normalize_method', 'ionize_method', 'convert3D_method', 'computeMD_method', 'TSV_varnames', 'TSV_objnames', 'TSV_activity', 'input_type' ] idata_params = [] for i in keylist: idata_params.append(self.getVal(i)) # MD_settings is a dictionary, obtain and sort the keys+values md_params = self.getDict('MD_settings') md_list = [] for key in md_params: # combine key + value in a single string md_list.append(key + str(md_params[key])) idata_params.append(md_list.sort()) # use picke as a buffered object, neccesary to generate the hexdigest p = pickle.dumps(idata_params) return hashlib.md5(p).hexdigest()
def action_info(model, version, output='text'): ''' Returns a text or JSON with results info for a given model and version ''' if model is None: return False, 'Empty model label' rdir = utils.model_path(model, version) if not os.path.isfile(os.path.join(rdir, 'results.pkl')): # compatibity method. use info.pkl if not os.path.isfile(os.path.join(rdir, 'info.pkl')): return False, 'Info file not found' with open(os.path.join(rdir, 'info.pkl'), 'rb') as handle: #retrieve a pickle file containing the keys 'model_build' #and 'model_validate' of results info = pickle.load(handle) info += pickle.load(handle) # end of compatibility method else: # new method, use results.pkl if not os.path.isfile(os.path.join(rdir, 'results.pkl')): return False, 'Info file not found' from flame.conveyor import Conveyor conveyor = Conveyor() with open(os.path.join(rdir, 'results.pkl'), 'rb') as handle: conveyor.load(handle) info = conveyor.getVal('model_build_info') info += conveyor.getVal('model_valid_info') if info == None: return False, 'Info not found' # when this function is called from the console, output is 'text' # write and exit if output == 'text': LOG.info(f'informing model {model} version {version}') for val in info: if len(val) < 3: LOG.info(val) else: LOG.info(f'{val[0]} ({val[1]}) : {val[2]}') return True, 'model informed OK' # this is only reached when this funcion is called from a web service # asking for a JSON # this code serializes the results in a list and then converts it # to a JSON json_results = [] for i in info: json_results.append(conveyor.modelInfoJSON(i)) #print (json.dumps(json_results)) return True, json.dumps(json_results)
def action_info(model, version, output='text'): ''' Returns a text or JSON with results info for a given model and version ''' if model is None: return False, 'Empty model label' rdir = utils.model_path(model, version) if not os.path.isfile(os.path.join(rdir, 'results.pkl')): return False, 'Info file not found' from flame.conveyor import Conveyor conveyor = Conveyor() with open(os.path.join(rdir, 'results.pkl'), 'rb') as handle: conveyor.load(handle) # if there is an error, return the error Message if conveyor.getError(): error = conveyor.getErrorMessage() return False, error # collect warnings warning = conveyor.getWarningMessage() # collect build and validation info build_info = conveyor.getVal('model_build_info') valid_info = conveyor.getVal('model_valid_info') # merge everything info = None for iinfo in (warning, build_info, valid_info): if info == None: info = iinfo else: if iinfo != None: info+=iinfo if info == None: return False, 'No relevant information found' # when this function is called from the console, output is 'text' # write and exit if output == 'text': LOG.info (f'informing model {model} version {version}') for val in info: if len(val) < 3: LOG.info(val) else: LOG.info(f'{val[0]} ({val[1]}) : {val[2]}') return True, 'model informed OK' # this is only reached when this funcion is called from a web service # asking for a JSON # this code serializes the results in a list and then converts it # to a JSON json_results = [] for i in info: json_results.append(conveyor.modelInfoJSON(i)) #print (json.dumps(json_results)) return True, json.dumps(json_results)