def ajax_start_synth(request): '''Perform the forward synthesis''' data = {'err': False} reactants = request.GET.get('reactants', None) solvent = request.GET.get('solvent', None) temperature = request.GET.get('temperature', None) reagents = request.GET.get('reagents', None) mincount = int(request.GET.get('mincount', 25)) maxreturn = int(request.GET.get('maxreturn', 100)) forward_scorer = request.GET.get('forward_scorer', 'Template_Free') print('Conditions for forward synthesis:') print('reactants: {}'.format(reactants)) print('solvent: {}'.format(solvent)) print('temp: {}'.format(temperature)) print('reagents: {}'.format(reagents)) print('mincount: {}'.format(mincount)) print('max return: {}'.format(maxreturn)) print('forward scorer: {}'.format(forward_scorer)) startTime = time.time() # context expected is (T1, slvt1, rgt1, cat1, t1, y1) if solvent == 'default': solvent = '' print('reset default solvent') res = evaluate.delay( reactants, '', contexts=[clean_context((temperature, solvent, reagents, '', -1, -1))], forward_scorer=forward_scorer, top_n=maxreturn, return_all_outcomes=True) outcomes = res.get(300)[0]['outcomes'] print('Got top outcomes, length {}'.format(len(outcomes))) #print(outcomes) data['html_time'] = '{:.3f} seconds elapsed'.format(time.time() - startTime) if outcomes: data['html'] = render_to_string('synth_outcomes_only.html', {'outcomes': outcomes}) else: data['html'] = 'No outcomes found? That is weird...' # Save in session in case used wants to print request.session['last_synth_interactive'] = { 'reactants': reactants, 'temperature': temperature, 'reagents': reagents, 'solvent': solvent, 'mincount': mincount, 'outcomes': outcomes, 'forward_scorer': forward_scorer } return JsonResponse(data)
def template_free(request): resp = {} resp['request'] = dict(**request.GET) reactants = request.GET.get('reactants') solvent = request.GET.get('solvent', '') reagents = request.GET.get('reagents', '') num_results = int(request.GET.get('num_results', 100)) contexts = [clean_context((None, solvent, reagents, '', -1, -1))] if not reactants: resp['error'] = 'Required parameter "reactants" missing' return JsonResponse(resp, status=400) rmol = Chem.MolFromSmiles(reactants) if not rmol: resp['error'] = 'Cannot parse reactants smiles with rdkit' return JsonResponse(resp, status=400) smol = Chem.MolFromSmiles(contexts[0][1]) if not smol: resp['error'] = 'Cannot parse solvent smiles with rdkit' return JsonResponse(resp, status=400) remol = Chem.MolFromSmiles(contexts[0][2]) if not remol: resp['error'] = 'Cannot parse reagents smiles with rdkit' return JsonResponse(resp, status=400) res = evaluate.delay(reactants, '', contexts=contexts, forward_scorer='Template_Free', top_n=num_results, return_all_outcomes=True) try: outcomes = res.get(TIMEOUT) except TimeoutError: resp['error'] = 'API request timed out (limit {}s)'.format(TIMEOUT) res.revoke() return JsonResponse(resp, status=408) except Exception as e: resp['error'] = str(e) res.revoke() return JsonResponse(resp, status=400) outcomes = outcomes[0]['outcomes'] for out in outcomes: o = out.pop('outcome') out['smiles'] = o['smiles'] resp['outcomes'] = outcomes return JsonResponse(resp)
def ajax_evaluate_rxnsmiles(request): '''Evaluate rxn_smiles''' data = {'err': False} smiles = request.GET.get('smiles', None) verbose = json.loads(request.GET.get('verbose', 'false')) synth_mincount = int(request.GET.get('synth_mincount', 0)) necessary_reagent = request.GET.get('necessary_reagent', '') forward_scorer = request.GET.get('forward_scorer', 'Template_Free') context_recommender = request.GET.get('context_recommender', 'Neural_Network') if necessary_reagent == 'false': necessary_reagent = '' reactants = smiles.split('>>')[0].split('.') products = smiles.split('>>')[1].split('.') if forward_scorer == 'Fast_Filter': res = fast_filter_check.delay('.'.join(reactants), '.'.join(products)) outcomes = res.get(5) score = outcomes[0][0]['score'] data['html'] = 'Estimated plausibility: {:.4f}'.format(score) B = 150. R = 255. - (score > 0.5) * (score - 0.5) * (255. - B) * 2. G = 255. - (score < 0.5) * (0.5 - score) * (255. - B) * 2. data['html_color'] = str('#%02x%02x%02x' % (int(R), int(G), int(B))) return JsonResponse(data) print('...trying to get predicted context') if necessary_reagent and makeit_gc.forward_scoring_needs_context_necessary_reagent[forward_scorer]: num_contexts = 1 elif makeit_gc.forward_scoring_needs_context[forward_scorer]: num_contexts = 10 else: num_contexts = 0 if num_contexts: res = get_context_recommendations.delay(smiles, n=num_contexts, context_recommender=context_recommender) contexts = res.get(60) print('Got context(s)') print(contexts) contexts = [clean_context(context) for context in contexts] print(contexts) if contexts is None: raise ValueError('Context recommender was unable to get valid context(?)') else: contexts = ['n/a'] print('Did not need a context') # Run reactant_smiles = smiles.split('>>')[0] print('Running forward evaluator on {}'.format(reactant_smiles)) if necessary_reagent: print('Need reagent and reagent suggestion is: {}'.format(contexts[0][2])) if necessary_reagent and contexts[0][2] and Chem.MolFromSmiles(contexts[0][2]): reactant_smiles += '.{}'.format(contexts[0][2]) # add rgt res = evaluate.delay(reactant_smiles, products[0], contexts, forward_scorer=forward_scorer, mincount=synth_mincount, top_n=50, return_all_outcomes=True) all_outcomes = res.get(300) if all([len(outcome) == 0 for outcome in all_outcomes]): if not verbose: data['html'] = 'Could not get outcomes - recommended context(s) unparseable' for i, (T, slvt, rgt, cat, t, y) in enumerate(contexts): data['html'] += '<br>{}) T={:.1f}, rgt={}, slvt={}'.format(i+1, T, rgt, slvt) data['html_color'] = str('#%02x%02x%02x' % (int(255), int(0), int(0))) return JsonResponse(data) else: # TODO: expand data['html'] = '<h3>Could not get outcomes - recommended context(s) unparseable</h3>\n<ol>\n' for i, (T, slvt, rgt, cat, t, y) in enumerate(contexts): data['html'] += '<li>Temp: {:.1f} C<br>Reagents: {}<br>Solvent: {}</li>\n'.format(T, rgt, slvt) data['html'] += '</ol>' data['html_color'] = str('#%02x%02x%02x' % (int(255), int(0), int(0))) return JsonResponse(data) plausible = [outcome['target']['prob'] for outcome in all_outcomes] print('All plausibilities: {}'.format(plausible)) ranks = [outcome['target']['rank'] for outcome in all_outcomes] major_prods = [outcome['top_product']['smiles'] for outcome in all_outcomes] major_probs = [outcome['top_product']['prob'] for outcome in all_outcomes] best_context_i = np.argmax(plausible) plausible = plausible[best_context_i] rank = ranks[best_context_i] best_context = contexts[best_context_i] major_prod = major_prods[best_context_i] major_prob = major_probs[best_context_i] # Report print('Recommended context(s): {}'.format(best_context)) print('Plausibility: {}'.format(plausible)) # print(all_outcomes[best_context_i]) if num_contexts: (T1, slvt1, rgt1, cat1, t1, y1) = best_context if not verbose: data['html'] = 'Plausibility score: {} (rank {})'.format(plausible, rank) if num_contexts: if not rgt1: rgt1 = 'no ' data['html'] += '<br><br><u>Top conditions</u>' data['html'] += '<br>{:.1f} C'.format(T1) data['html'] += '<br>{} solvent'.format(slvt1) data['html'] += '<br>{} reagents'.format(rgt1) data['html'] += '<br>nearest-neighbor got {}% yield'.format(y1) if rank != 1: data['html'] += '<br>Predicted major product with p = {:.4f}'.format(major_prob) data['html'] += '<br>{}'.format(major_prod) if major_prod != 'none found': url = reverse('draw_smiles', kwargs={'smiles':major_prod}) data['html'] += '<br><img src="' + url + '">' #data['html'] += '<br>(calc. used synth_mincount {})'.format(synth_mincount) else: data['html'] = '<h3>Plausibility score: {} (rank {})</h3>'.format(plausible, rank) if num_contexts: if not rgt1: rgt1 = 'none' data['html'] += '\n<br><u>Proposed conditions ({} tried)</u>\n'.format(len(contexts)) data['html'] += '<br>Temp: {:.1f} C<br>Reagents: {}<br>Solvent: {}\n'.format(T1, rgt1, slvt1) if rank != 1: data['html'] += '<br><br><u>Predicted major product (<i>p = {:.4f}</i>)</u>'.format(major_prob) data['html'] += '\n<br>{}'.format(major_prod) if major_prod != 'none found': url = reverse('draw_smiles', kwargs={'smiles':major_prod}) data['html'] += '<br><img src="' + url + '">' # plausible = plausible / 100. B = 150. R = 255. - (plausible > 0.5) * (plausible - 0.5) * (255. - B) * 2. G = 255. - (plausible < 0.5) * (0.5 - plausible) * (255. - B) * 2. data['html_color'] = str('#%02x%02x%02x' % (int(R), int(G), int(B))) return JsonResponse(data)
info_path=gc.NEURALNET_CONTEXT_REC['info_path'], weights_path=gc.NEURALNET_CONTEXT_REC['weights_path']) evaluator = Evaluator(celery=False) from tqdm import tqdm cond_dict = collections.defaultdict(list) ctr = 0 for key, value in tqdm(encoded_rxn_dict.items()): rxn = rxn_le[key] rsmi = rxn.split('>>')[0] psmi = rxn.split('>>')[1] uncleaned_contexts = cont.get_n_conditions(rxn, n=10, return_separate=True) contexts = cont.get_n_conditions(rxn, n=10, return_separate=False) contexts = [context_cleaner.clean_context(context) for context in contexts] try: eval_res = evaluator.evaluate( rsmi, psmi, contexts, ) except Exception as e: print(e) eval_res = [{'target': {'prob': 0}}] * len(uncleaned_contexts) encoded_rxn_dict[key]['cond'] = {} encoded_rxn_dict[key]['score'] = {} for i in range(len(eval_res)): eval_res[i]['context'] = uncleaned_contexts[i][:5] encoded_rxn_dict[key]['cond'][i] = eval_res[i]['context'] encoded_rxn_dict[key]['score'][i] = eval_res[i]['target']['prob']
def evaluate_tree(self, tree, context_recommender='', context_scoring_method='', forward_scoring_method='', tree_scoring_method='', rank_threshold=5, prob_threshold=0.2, mincount=25, nproc=1, batch_size=500, n=10, is_target=False, reset=False, worker_no=0, template_count=10000): if is_target and reset: self.reset() self.get_context_prioritizer(context_scoring_method) self.rank_threshold = rank_threshold self.prob_threshold = prob_threshold self.mincount = mincount self.recommender = context_recommender self.nproc = nproc self.batch_size = batch_size self.forward_scorer = forward_scoring_method self.tree_scorer = tree_scoring_method self.template_count = template_count if not tree['children']: # Reached the end of the synthesis tree -> Stop if is_target: return {'tree': tree, 'plausible': True, 'score': 1.0} else: return True, 1.0 else: if self.celery: from celery.result import allow_join_result else: from makeit.utilities.with_dummy import with_dummy as allow_join_result with allow_join_result(): target = tree['smiles'] reaction = tree['children'][0] reactants = [child['smiles'] for child in reaction['children']] reaction_smiles = reaction['smiles'] necessary_reagent = reaction['necessary_reagent'] ############################################################### # If reaction encountered before: get data from dict. ############################################################### if reaction_smiles in self.evaluation_dict: evaluation = self.evaluation_dict[reaction_smiles] ############################################################### # Otherwise create data ############################################################### else: # # TODO: better way of deciding if context recommendation is needed contexts = None if gc.forward_scoring_needs_context_necessary_reagent[ forward_scoring_method]: if not self._loaded_context_recommender: self.load_context_recommender() if necessary_reagent: contexts = self.get_contexts(reaction_smiles, 1) if contexts is not None and len( contexts) > 0 and len( contexts[0]) >= 3 and contexts[0][2]: reactants.extend( contexts[0][2].split('.')) # add reagents elif gc.forward_scoring_needs_context[ forward_scoring_method]: contexts = self.get_contexts(reaction_smiles, n) elif self.recommender != gc.nearest_neighbor: #the not using the nearest neighbor model: contexts = self.get_contexts(reaction_smiles, 1) contexts = [ context_cleaner.clean_context(context) for context in contexts ] if not contexts: contexts = ['n/a'] # remove context without parsible smiles string evaluation = self.evaluate_reaction('.'.join(reactants), target, contexts, worker_no=worker_no) self.evaluation_dict[reaction_smiles] = evaluation ############################################################### # Process data ############################################################### if len(evaluation) == 1: top_result = evaluation[0] else: top_result = self.get_top_context(evaluation) # Add evaluation information to the reaction MyLogger.print_and_log( 'Evaluated reaction: {} - ranked {} with a {}% probability.' .format(reaction_smiles, top_result['target']['rank'], top_result['target']['prob'] * 100.0), treeEvaluator_loc) score = self.score_step(reaction['template_score'], top_result['target']['prob']) plausible = self.is_plausible(top_result) print((reaction_smiles, plausible)) all_children_plausible = True for child in reaction['children']: # TODO: pproperly pass arguments to next evaluate_tree call child_plausible, score_child = self.evaluate_tree( child, context_recommender=context_recommender, context_scoring_method=context_scoring_method, forward_scoring_method=forward_scoring_method, tree_scoring_method=tree_scoring_method, rank_threshold=rank_threshold, prob_threshold=prob_threshold, mincount=mincount, nproc=nproc, batch_size=batch_size, n=n, is_target=False, reset=False, worker_no=worker_no, template_count=template_count) score *= score_child if not child_plausible: all_children_plausible = False if all_children_plausible and plausible and is_target: MyLogger.print_and_log('Found a fully plausible tree!', treeEvaluator_loc) elif is_target: MyLogger.print_and_log( 'Evaluated tree has unfeasible children.', treeEvaluator_loc) reaction['top_product'] = { 'smiles': top_result['top_product']['smiles'], 'score': top_result['top_product']['score'], 'prob': top_result['top_product']['prob'], } reaction['forward_score'] = top_result['target']['prob'] reaction['cumul_score'] = score reaction['rank'] = top_result['target']['rank'] reaction['templates'] = top_result['target']['template_ids'] reaction['context'] = top_result['context'] # overwrite tree['children'] = [reaction] if is_target: return { 'tree': tree, 'plausible': plausible and all_children_plausible, 'score': score } else: return plausible and all_children_plausible, score