best_ind_dict = cell_evaluator.param_dict(best_ind)
print(cell_evaluator.evaluate_with_dicts(best_ind_dict))


model = cell_evaluator.cell_model
cell_evaluator.param_dict(best_ind)
model.attrs = {str(k):float(v) for k,v in cell_evaluator.param_dict(best_ind).items()}



opt = model.model_to_dtc()
opt.attrs = {str(k):float(v) for k,v in cell_evaluator.param_dict(best_ind).items()}

check_binary_match(target,opt)
inject_and_plot_passive_model(opt,second=target)


# As you can see the evaluation returns the same values as the fitness values provided by the optimisation output. 
# We can have a look at the responses now.

# In[ ]:


#plot_responses(twostep_protocol.run(cell_model=simple_cell, param_values=best_ind_dict, sim=nrn))
 


# Let's have a look at the optimisation statistics.
# We can plot the minimal score (sum of all objective scores) found in every optimisation. 
# The optimisation algorithm uses negative fitness scores, so we actually have to look at the maximum values log.
def instance_opt(experimental_constraints,MODEL_PARAMS,test_key,model_value,MU,NGEN):
  cell_evaluator, simple_cell, score_calc, test_names = utils.make_evaluator(
                                                        experimental_constraints,
                                                        MODEL_PARAMS,
                                                        test_key,
                                                        model=model_value)
  #cell_evaluator, simple_cell, score_calc = make_evaluator(cells,MODEL_PARAMS)

  #MU =10
  mut = 0.05
  cxp = 0.1
  optimisation = bpop.optimisations.DEAPOptimisation(
          evaluator=cell_evaluator,
          offspring_size = MU,
          map_function = utils.dask_map_function,
          selector_name=diversity,
          mutpb=mut,
          cxpb=cxp)

  final_pop, hall_of_fame, logs, hist = optimisation.run(max_ngen=NGEN)
  best_ind = hall_of_fame[0]

  
  st.markdown('---')
  st.success("Model best fit to experiment {0}".format(test_key))
  st.markdown("Would you like to pickle the optimal model? (Note not implemented yet, but trivial)")
  
  st.markdown('---')
  st.markdown('\n\n\n\n')

  best_ind_dict = cell_evaluator.param_dict(best_ind)
  model = cell_evaluator.cell_model
  cell_evaluator.param_dict(best_ind)
  model.attrs = {str(k):float(v) for k,v in cell_evaluator.param_dict(best_ind).items()}
  opt = model.model_to_dtc()
  opt.attrs = {str(k):float(v) for k,v in cell_evaluator.param_dict(best_ind).items()}
  opt.tests = experimental_constraints
  obs_preds = opt.make_pretty(experimental_constraints)

  frame = opt.SA.to_frame()
  st.write(frame.T)

  obs_preds = opt.obs_preds
  st.write(obs_preds.T)
  st.markdown("----")

  st.markdown("Model behavior at rheobase current injection")

  vm,fig = inject_and_plot_model(opt,plotly=True)
  st.write(fig)
  st.markdown("----")

  st.markdown("Model behavior at -10pA current injection")
  fig = inject_and_plot_passive_model(opt,opt,plotly=True)
  st.write(fig)

  #plt.show()
  st.markdown("""
  -----
  The optimal model parameterization is {0}    
  -----
  """.format(opt.attrs))

  st.markdown("""
  -----
  Model Performance Relative to fitting data {0}    
  -----
  """.format(sum(best_ind.fitness.values)/(30*len(experimental_constraints))))
  # radio_value = st.sidebar.radtio("Target Number of Samples",[10,20,30])
  st.markdown("""
  -----
  This score is {0} worst score is {1}  
  -----
  """.format(sum(best_ind.fitness.values),30*len(experimental_constraints)))
示例#3
0
model = cell_evaluator.cell_model
cell_evaluator.param_dict(best_ind)
model.attrs = {
    str(k): float(v)
    for k, v in cell_evaluator.param_dict(best_ind).items()
}

opt = model.model_to_dtc()
opt.attrs = {
    str(k): float(v)
    for k, v in cell_evaluator.param_dict(best_ind).items()
}

check_binary_match(target, opt)
inject_and_plot_passive_model(opt, second=target)

# As you can see the evaluation returns the same values as the fitness values provided by the optimisation output.
# We can have a look at the responses now.

# In[ ]:

#plot_responses(twostep_protocol.run(cell_model=simple_cell, param_values=best_ind_dict, sim=nrn))

# Let's have a look at the optimisation statistics.
# We can plot the minimal score (sum of all objective scores) found in every optimisation.
# The optimisation algorithm uses negative fitness scores, so we actually have to look at the maximum values log.

gen_numbers = logs.select('gen')
min_fitness = logs.select('min')
max_fitness = logs.select('max')