Пример #1
0
 def test_lmoma(self):
     pfba_solution = pfba(self.model)
     solution = lmoma(self.model, reference=pfba_solution)
     distance = sum([abs(solution[v] - pfba_solution[v]) for v in list(pfba_solution.keys())])
     self.assertAlmostEqual(0, distance,
                            delta=1e-6,
                            msg="lmoma distance without knockouts must be 0 (was %f)" % distance)
Пример #2
0
 def test_pfba_with_reaction_filter(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(
         core_model,
         reactions=['EX_o2_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_'])
     assert len(pfba_solution.fluxes) == 2
     assert core_model.objective is original_objective
Пример #3
0
 def test_lmoma(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = lmoma(core_model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     assert abs(0 - distance) < 1e-6, "lmoma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective.expression == original_objective.expression
Пример #4
0
 def test_room(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = room(core_model, reference=pfba_solution)
     assert abs(0 - solution.objective_value) < 1e-6, \
         "room objective without knockouts must be 0 (was %f)" % solution.objective_value
     assert core_model.objective is original_objective
Пример #5
0
 def test_room_with_reaction_filter(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     solution = room(self.model, reference=pfba_solution,
                     reactions=['EX_o2_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_'])
     self.assertEqual(len(solution.fluxes), 2)
     self.assertIs(self.model.objective, original_objective)
Пример #6
0
 def test_lmoma(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = lmoma(core_model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     assert abs(0 - distance) < 1e-6, "lmoma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective.expression == original_objective.expression
Пример #7
0
 def test_room_with_reaction_filter(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     solution = room(self.model, reference=pfba_solution,
                     reactions=['EX_o2_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_'])
     self.assertEqual(len(solution.fluxes), 2)
     self.assertIs(self.model.objective, original_objective)
Пример #8
0
 def test_room_with_reaction_filter(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = room(core_model, reference=pfba_solution,
                     reactions=['EX_o2_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_'])
     assert len(solution.fluxes) == 2
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("y_") for v in core_model.solver.variables)
Пример #9
0
 def test_room_with_reaction_filter(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = room(core_model, reference=pfba_solution,
                     reactions=['EX_o2_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_'])
     assert len(solution.fluxes) == 2
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("y_") for v in core_model.solver.variables)
Пример #10
0
 def simulation_kwargs(self, simulation_kwargs):
     if self.simulation_method in [
             lmoma, moma, room
     ] and simulation_kwargs.get("reference", None) is None:
         logger.warning("No WT reference found, generating using pfba.")
         simulation_kwargs['reference'] = pfba(self.model).fluxes
         logger.warning("Reference successfully computed.")
     self._simulation_kwargs = simulation_kwargs
Пример #11
0
 def test_pfba(self):
     fba_solution = fba(self.model)
     fba_flux_sum = sum((abs(val) for val in list(fba_solution.fluxes.values())))
     pfba_solution = pfba(self.model)
     pfba_flux_sum = sum((abs(val) for val in list(pfba_solution.fluxes.values())))
     # looks like GLPK finds a parsimonious solution without the flux minimization objective
     self.assertTrue((pfba_flux_sum - fba_flux_sum) < 1e-6,
                     msg="FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum))
Пример #12
0
 def test_room(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     solution = room(self.model, reference=pfba_solution)
     self.assertAlmostEqual(0, solution.objective_value,
                            delta=1e-6,
                            msg="room objective without knockouts must be 0 (was %f)" % solution.objective_value)
     self.assertIs(self.model.objective, original_objective)
Пример #13
0
 def test_room(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     solution = room(self.model, reference=pfba_solution)
     self.assertAlmostEqual(0, solution.objective_value,
                            delta=1e-6,
                            msg="room objective without knockouts must be 0 (was %f)" % solution.objective_value)
     self.assertIs(self.model.objective, original_objective)
Пример #14
0
 def test_pfba_iJO(self):
     fba_solution = fba(iJO_MODEL)
     fba_flux_sum = sum((abs(val) for val in list(fba_solution.fluxes.values())))
     pfba_solution = pfba(iJO_MODEL)
     pfba_flux_sum = sum((abs(val) for val in list(pfba_solution.fluxes.values())))
     print(pfba_flux_sum)
     self.assertTrue((pfba_flux_sum - fba_flux_sum) < 1e-6,
                     msg="FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum))
Пример #15
0
 def test_pfba_ijo1366(self, ijo1366):
     original_objective = ijo1366.objective
     fba_solution = fba(ijo1366)
     fba_flux_sum = sum((abs(val) for val in fba_solution.fluxes.values))
     pfba_solution = pfba(ijo1366)
     pfba_flux_sum = sum((abs(val) for val in pfba_solution.fluxes.values))
     assert (pfba_flux_sum - fba_flux_sum) < 1e-6, \
         "FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum)
     assert ijo1366.objective.expression == original_objective.expression
Пример #16
0
 def test_moma(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     solution = moma(self.model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     self.assertAlmostEqual(0, distance,
                            delta=1e-6,
                            msg="moma distance without knockouts must be 0 (was %f)" % distance)
     self.assertIs(self.model.objective, original_objective)
Пример #17
0
 def test_pfba_iJO(self):
     original_objective = self.model.objective
     fba_solution = fba(iJO_MODEL)
     fba_flux_sum = sum((abs(val) for val in fba_solution.fluxes.values()))
     pfba_solution = pfba(iJO_MODEL)
     pfba_flux_sum = sum((abs(val) for val in pfba_solution.fluxes.values()))
     self.assertTrue((pfba_flux_sum - fba_flux_sum) < 1e-6,
                     msg="FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum))
     self.assertIs(self.model.objective, original_objective)
Пример #18
0
 def test_pfba_ijo1366(self, ijo1366):
     original_objective = ijo1366.objective
     fba_solution = fba(ijo1366)
     fba_flux_sum = sum((abs(val) for val in fba_solution.fluxes.values))
     pfba_solution = pfba(ijo1366)
     pfba_flux_sum = sum((abs(val) for val in pfba_solution.fluxes.values))
     assert (pfba_flux_sum - fba_flux_sum) < 1e-6, \
         "FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum)
     assert ijo1366.objective.expression == original_objective.expression
Пример #19
0
 def test_room(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = room(core_model, reference=pfba_solution)
     assert abs(0 - solution.objective_value) < 1e-6, \
         "room objective without knockouts must be 0 (was %f)" % solution.objective_value
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("y_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
Пример #20
0
 def test_moma(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     solution = moma(self.model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     self.assertAlmostEqual(0, distance,
                            delta=1e-6,
                            msg="moma distance without knockouts must be 0 (was %f)" % distance)
     self.assertIs(self.model.objective, original_objective)
Пример #21
0
 def test_room(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = room(core_model, reference=pfba_solution)
     assert abs(0 - solution.objective_value) < 1e-6, \
         "room objective without knockouts must be 0 (was %f)" % solution.objective_value
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("y_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
Пример #22
0
 def test_pfba_iJO(self):
     original_objective = self.model.objective
     fba_solution = fba(iJO_MODEL)
     fba_flux_sum = sum((abs(val) for val in fba_solution.fluxes.values()))
     pfba_solution = pfba(iJO_MODEL)
     pfba_flux_sum = sum((abs(val) for val in pfba_solution.fluxes.values()))
     self.assertTrue((pfba_flux_sum - fba_flux_sum) < 1e-6,
                     msg="FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum))
     self.assertIs(self.model.objective, original_objective)
Пример #23
0
 def test_pfba(self):
     original_objective = self.model.objective
     fba_solution = fba(self.model)
     fba_flux_sum = sum((abs(val) for val in list(fba_solution.fluxes.values())))
     pfba_solution = pfba(self.model)
     pfba_flux_sum = sum((abs(val) for val in list(pfba_solution.fluxes.values())))
     # looks like GLPK finds a parsimonious solution without the flux minimization objective
     self.assertTrue((pfba_flux_sum - fba_flux_sum) < 1e-6,
                     msg="FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum))
     self.assertIs(self.model.objective, original_objective)
Пример #24
0
 def test_lmoma_change_ref(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     fluxes = {rid: 10 * flux for rid, flux in pfba_solution.items()}
     solution = lmoma(core_model, reference=fluxes)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     assert abs(0 - distance) > 1e-6, "lmoma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("u_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("lmoma_const_") for c in core_model.solver.constraints)
Пример #25
0
 def test_lmoma_change_ref(self):
     original_objective = self.model.objective
     pfba_solution = pfba(self.model)
     fluxes = {rid: 10*flux for rid, flux in pfba_solution.items()}
     solution = lmoma(self.model, reference=fluxes)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     self.assertNotAlmostEqual(0, distance,
                            delta=1e-6,
                            msg="lmoma distance without knockouts must be 0 (was %f)" % distance)
     self.assertIs(self.model.objective, original_objective)
Пример #26
0
 def test_pfba(self, core_model):
     original_objective = core_model.objective
     fba_solution = fba(core_model)
     fba_flux_sum = sum((abs(val) for val in list(fba_solution.fluxes.values)))
     pfba_solution = pfba(core_model)
     pfba_flux_sum = sum((abs(val) for val in list(pfba_solution.fluxes.values)))
     # looks like GLPK finds a parsimonious solution without the flux minimization objective
     assert (pfba_flux_sum - fba_flux_sum) < 1e-6, \
         "FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum)
     assert core_model.objective.expression == original_objective.expression
Пример #27
0
 def test_pfba(self, core_model):
     original_objective = core_model.objective
     fba_solution = fba(core_model)
     fba_flux_sum = sum((abs(val) for val in list(fba_solution.fluxes.values)))
     pfba_solution = pfba(core_model)
     pfba_flux_sum = sum((abs(val) for val in list(pfba_solution.fluxes.values)))
     # looks like GLPK finds a parsimonious solution without the flux minimization objective
     assert (pfba_flux_sum - fba_flux_sum) < 1e-6, \
         "FBA sum is suppose to be lower than PFBA (was %f)" % (pfba_flux_sum - fba_flux_sum)
     assert core_model.objective.expression == original_objective.expression
Пример #28
0
 def test_lmoma_change_ref(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     fluxes = {rid: 10 * flux for rid, flux in pfba_solution.items()}
     solution = lmoma(core_model, reference=fluxes)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     assert abs(0 - distance) > 1e-6, "lmoma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("u_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("lmoma_const_") for c in core_model.solver.constraints)
Пример #29
0
 def test_lmoma_change_ref(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     fluxes = {rid: 10 * flux for rid, flux in pfba_solution.items()}
     solution = lmoma(core_model, reference=fluxes)
     distance = sum((abs(solution[v] - pfba_solution[v])
                     for v in pfba_solution.keys()))
     assert abs(
         0 - distance
     ) > 1e-6, "lmoma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective is original_objective
Пример #30
0
 def test_moma(self, core_model):
     if current_solver_name(core_model) == 'glpk':
         pytest.skip('glpk does not support qp')
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = moma(core_model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     assert abs(0 - distance) < 1e-6, "moma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("moma_aux_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
Пример #31
0
 def test_moma(self, core_model):
     if current_solver_name(core_model) == 'glpk':
         pytest.skip('glpk does not support qp')
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = moma(core_model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v]) for v in pfba_solution.keys()))
     assert abs(0 - distance) < 1e-6, "moma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("moma_aux_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
Пример #32
0
 def test_pfba_iJO(self):
     fba_solution = fba(iJO_MODEL)
     fba_flux_sum = sum(
         (abs(val) for val in list(fba_solution.fluxes.values())))
     pfba_solution = pfba(iJO_MODEL)
     pfba_flux_sum = sum(
         (abs(val) for val in list(pfba_solution.fluxes.values())))
     print(pfba_flux_sum)
     self.assertTrue(
         (pfba_flux_sum - fba_flux_sum) < 1e-6,
         msg="FBA sum is suppose to be lower than PFBA (was %f)" %
         (pfba_flux_sum - fba_flux_sum))
Пример #33
0
 def test_moma(self, core_model):
     if current_solver_name(core_model) == 'glpk':
         pytest.skip('glpk does not support qp')
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     solution = moma(core_model, reference=pfba_solution)
     distance = sum((abs(solution[v] - pfba_solution[v])
                     for v in pfba_solution.keys()))
     assert abs(
         0 - distance
     ) < 1e-6, "moma distance without knockouts must be 0 (was %f)" % distance
     assert core_model.objective is original_objective
Пример #34
0
 def test_lmoma(self):
     pfba_solution = pfba(self.model)
     solution = lmoma(self.model, reference=pfba_solution)
     distance = sum([
         abs(solution[v] - pfba_solution[v])
         for v in list(pfba_solution.keys())
     ])
     self.assertAlmostEqual(
         0,
         distance,
         delta=1e-6,
         msg="lmoma distance without knockouts must be 0 (was %f)" %
         distance)
Пример #35
0
 def test_lmoma_with_cache(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     essential_reactions = find_essential_reactions(core_model)
     cache = ProblemCache(core_model)
     for r in core_model.reactions:
         if r not in essential_reactions:
             with core_model:
                 r.knock_out()
                 lmoma(core_model, reference=pfba_solution, cache=cache)
                 assert any(v.name.startswith("u_") for v in core_model.solver.variables)
                 assert any(c.name.startswith("lmoma_const_") for c in core_model.solver.constraints)
     cache.reset()
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("u_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("lmoma_const_") for c in core_model.solver.constraints)
Пример #36
0
 def test_lmoma_with_cache(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     essential_reactions = find_essential_reactions(core_model)
     cache = ProblemCache(core_model)
     for r in core_model.reactions:
         if r not in essential_reactions:
             with core_model:
                 r.knock_out()
                 lmoma(core_model, reference=pfba_solution, cache=cache)
                 assert any(v.name.startswith("u_") for v in core_model.solver.variables)
                 assert any(c.name.startswith("lmoma_const_") for c in core_model.solver.constraints)
     cache.reset()
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("u_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("lmoma_const_") for c in core_model.solver.constraints)
Пример #37
0
 def test_moma_with_cache(self, core_model):
     if current_solver_name(core_model) == 'glpk':
         pytest.skip('glpk does not support qp')
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     essential_reactions = find_essential_reactions(core_model)
     cache = ProblemCache(core_model)
     for r in core_model.reactions:
         if r not in essential_reactions:
             with core_model:
                 r.knock_out()
                 moma(core_model, reference=pfba_solution, cache=cache)
                 assert any(v.name.startswith("moma_aux_") for v in core_model.solver.variables)
                 assert any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
     cache.reset()
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("moma_aux_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
Пример #38
0
 def test_moma_with_cache(self, core_model):
     if current_solver_name(core_model) == 'glpk':
         pytest.skip('glpk does not support qp')
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     essential_reactions = find_essential_reactions(core_model)
     cache = ProblemCache(core_model)
     for r in core_model.reactions:
         if r not in essential_reactions:
             with core_model:
                 r.knock_out()
                 moma(core_model, reference=pfba_solution, cache=cache)
                 assert any(v.name.startswith("moma_aux_") for v in core_model.solver.variables)
                 assert any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
     cache.reset()
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("moma_aux_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("moma_const_") for c in core_model.solver.constraints)
Пример #39
0
 def test_room_with_cache(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     essential_reactions = find_essential_reactions(core_model)
     cache = ProblemCache(core_model)
     infeasible = 0
     for r in core_model.reactions:
         if r not in essential_reactions:
             with core_model:
                 r.knock_out()
                 try:
                     room(core_model, reference=pfba_solution, cache=cache)
                     assert any(v.name.startswith("y_") for v in core_model.solver.variables)
                     assert any(c.name.startswith("room_const_") for c in core_model.solver.constraints)
                 except OptimizationError:  # TODO: room shouldn't return infeasible for non-essential reactions
                     infeasible += 1
                     continue
     assert infeasible < len(core_model.reactions)
     cache.reset()
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("y_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("room_const_") for c in core_model.solver.constraints)
Пример #40
0
 def test_room_with_cache(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model)
     essential_reactions = find_essential_reactions(core_model)
     cache = ProblemCache(core_model)
     infeasible = 0
     for r in core_model.reactions:
         if r not in essential_reactions:
             with core_model:
                 r.knock_out()
                 try:
                     room(core_model, reference=pfba_solution, cache=cache)
                     assert any(v.name.startswith("y_") for v in core_model.solver.variables)
                     assert any(c.name.startswith("room_const_") for c in core_model.solver.constraints)
                 except OptimizationError:  # TODO: room shouldn't return infeasible for non-essential reactions
                     infeasible += 1
                     continue
     assert infeasible < len(core_model.reactions)
     cache.reset()
     assert core_model.objective.expression == original_objective.expression
     assert not any(v.name.startswith("y_") for v in core_model.solver.variables)
     assert not any(c.name.startswith("room_const_") for c in core_model.solver.constraints)
Пример #41
0
    def run(self, surface_only=True, improvements_only=True, view=None):
        """Run the differential flux variability analysis.

        Parameters
        ----------
        surface_only : bool, optional
            If only the surface of the n-dimensional production envelope should be scanned (defaults to True).
        improvements_only : bool, optional
            If only grid points should should be scanned that constitute and improvement in production
            over the reference state (defaults to True).
        view : SequentialView or MultiprocessingView or ipython.cluster.DirectView, optional
            A parallelization view (defaults to SequentialView).

        Returns
        -------
        pandas.Panel
            A pandas Panel containing a results DataFrame for every grid point scanned.
        """
        with TimeMachine() as tm:
            # Make sure that the design_space_model is initialized to its original state later
            for variable in self.variables:
                reaction = self.design_space_model.reactions.get_by_id(
                    variable)
                tm(do=int,
                   undo=partial(setattr, reaction, 'lower_bound',
                                reaction.lower_bound))
                tm(do=int,
                   undo=partial(setattr, reaction, 'upper_bound',
                                reaction.upper_bound))
            target_reaction = self.design_space_model.reactions.get_by_id(
                self.objective)
            tm(do=int,
               undo=partial(setattr, target_reaction, 'lower_bound',
                            target_reaction.lower_bound))
            tm(do=int,
               undo=partial(setattr, target_reaction, 'upper_bound',
                            target_reaction.upper_bound))

            if view is None:
                view = config.default_view
            else:
                view = view

            included_reactions = [
                reaction.id for reaction in self.reference_model.reactions
                if reaction.id not in self.exclude
            ] + self.variables + [self.objective]

            self.reference_flux_dist = pfba(self.reference_model,
                                            fraction_of_optimum=0.99)

            self.reference_flux_ranges = flux_variability_analysis(
                self.reference_model,
                reactions=included_reactions,
                view=view,
                remove_cycles=False,
                fraction_of_optimum=0.75).data_frame

            self._init_search_grid(surface_only=surface_only,
                                   improvements_only=improvements_only)

            progress = ProgressBar(len(self.grid))
            func_obj = _DifferentialFvaEvaluator(self.design_space_model,
                                                 self.variables,
                                                 self.objective,
                                                 included_reactions)
            results = list(progress(view.imap(func_obj, self.grid.iterrows())))

        solutions = dict((tuple(point.iteritems()), fva_result)
                         for (point, fva_result) in results)
        reference_intervals = self.reference_flux_ranges[[
            'lower_bound', 'upper_bound'
        ]].values
        for sol in six.itervalues(solutions):
            intervals = sol[['lower_bound', 'upper_bound']].values
            gaps = [
                self._interval_gap(interval1, interval2) for interval1,
                interval2 in my_zip(reference_intervals, intervals)
            ]
            sol['gaps'] = gaps
            if self.normalize_ranges_by is not None:
                normalizer = sol.lower_bound[self.normalize_ranges_by]
                normalized_intervals = sol[['lower_bound', 'upper_bound'
                                            ]].values / normalizer

                normalized_gaps = [
                    self._interval_gap(interval1, interval2)
                    for interval1, interval2 in my_zip(reference_intervals,
                                                       normalized_intervals)
                ]
                sol['normalized_gaps'] = normalized_gaps
            else:
                sol['normalized_gaps'] = gaps

        ref_upper_bound = self.reference_flux_ranges.upper_bound.apply(
            lambda v: 0 if abs(v) < non_zero_flux_threshold else v)
        ref_lower_bound = self.reference_flux_ranges.lower_bound.apply(
            lambda v: 0 if abs(v) < non_zero_flux_threshold else v)

        for df in six.itervalues(solutions):
            df['KO'] = False
            df['flux_reversal'] = False
            df['suddenly_essential'] = False
            df['free_flux'] = False

            ko_selection = df[(df.lower_bound == 0) & (df.upper_bound == 0) &
                              (ref_upper_bound != 0) & (ref_lower_bound != 0)]

            flux_reversal_selection = df[((ref_upper_bound < 0) &
                                          (df.lower_bound > 0) |
                                          ((ref_lower_bound > 0) &
                                           (df.upper_bound < 0)))]

            suddenly_essential_selection = df[((df.lower_bound <= 0) &
                                               (df.lower_bound > 0)) |
                                              ((ref_lower_bound >= 0) &
                                               (df.upper_bound <= 0))]

            is_reversible = [
                self.design_space_model.reactions.get_by_id(i).reversibility
                for i in df.index
            ]
            not_reversible = [not v for v in is_reversible]
            free_flux_selection = df[((df.lower_bound == -1000) &
                                      (df.upper_bound == 1000) & is_reversible)
                                     | ((df.lower_bound == 0) &
                                        (df.upper_bound == 1000)
                                        & not_reversible) |
                                     ((df.lower_bound == -1000) &
                                      (df.upper_bound == 0) & not_reversible)]

            df.loc[suddenly_essential_selection.index,
                   'suddenly_essential'] = True
            df.loc[ko_selection.index, 'KO'] = True
            df.loc[flux_reversal_selection.index, 'flux_reversal'] = True
            df.loc[free_flux_selection.index, 'free_flux'] = True

            df['excluded'] = [index in self.exclude for index in df.index]

        return DifferentialFVAResult(pandas.Panel(solutions), self.envelope,
                                     self.reference_flux_ranges,
                                     self.reference_flux_dist)
Пример #42
0
 def test_pfba_with_reaction_filter(self, core_model):
     original_objective = core_model.objective
     pfba_solution = pfba(core_model, reactions=['EX_o2_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_'])
     assert len(pfba_solution.fluxes) == 2
     assert core_model.objective.expression == original_objective.expression
Пример #43
0
 def simulation_method(self, simulation_method):
     if simulation_method in [lmoma, moma, room
                              ] and self.wt_reference is None:
         logger.info("No WT reference found, generating using pfba.")
         self.wt_reference = pfba(self.model).fluxes
     self._simulation_method = simulation_method
Пример #44
0
    def run(self, surface_only=True, improvements_only=True, progress=True, view=None):
        """Run the differential flux variability analysis.

        Parameters
        ----------
        surface_only : bool, optional
            If only the surface of the n-dimensional production envelope should be scanned (defaults to True).
        improvements_only : bool, optional
            If only grid points should should be scanned that constitute and improvement in production
            over the reference state (defaults to True).
        progress : bool, optional
            If a progress bar should be shown.
        view : SequentialView or MultiprocessingView or ipython.cluster.DirectView, optional
            A parallelization view (defaults to SequentialView).

        Returns
        -------
        pandas.Panel
            A pandas Panel containing a results DataFrame for every grid point scanned.
        """
        with TimeMachine() as tm:
            # Make sure that the design_space_model is initialized to its original state later
            for variable in self.variables:
                reaction = self.design_space_model.reactions.get_by_id(variable)
                tm(do=int, undo=partial(setattr, reaction, 'lower_bound', reaction.lower_bound))
                tm(do=int, undo=partial(setattr, reaction, 'upper_bound', reaction.upper_bound))
            target_reaction = self.design_space_model.reactions.get_by_id(self.objective)
            tm(do=int, undo=partial(setattr, target_reaction, 'lower_bound', target_reaction.lower_bound))
            tm(do=int, undo=partial(setattr, target_reaction, 'upper_bound', target_reaction.upper_bound))

            if view is None:
                view = config.default_view
            else:
                view = view

            included_reactions = [reaction.id for reaction in self.reference_model.reactions if
                                  reaction.id not in self.exclude] + self.variables + [self.objective]

            self.reference_flux_dist = pfba(self.reference_model, fraction_of_optimum=0.99)

            self.reference_flux_ranges = flux_variability_analysis(self.reference_model, reactions=included_reactions,
                                                                   view=view, remove_cycles=False,
                                                                   fraction_of_optimum=0.75).data_frame

            self._init_search_grid(surface_only=surface_only, improvements_only=improvements_only)

            func_obj = _DifferentialFvaEvaluator(self.design_space_model, self.variables, self.objective,
                                                 included_reactions)
            if progress:
                progress = ProgressBar(len(self.grid))
                results = list(progress(view.imap(func_obj, self.grid.iterrows())))
            else:
                results = list(view.map(func_obj, self.grid.iterrows()))

        solutions = dict((tuple(point.iteritems()), fva_result) for (point, fva_result) in results)
        reference_intervals = self.reference_flux_ranges[['lower_bound', 'upper_bound']].values
        for sol in six.itervalues(solutions):
            intervals = sol[['lower_bound', 'upper_bound']].values
            gaps = [self._interval_gap(interval1, interval2) for interval1, interval2 in
                    my_zip(reference_intervals, intervals)]
            sol['gaps'] = gaps
            if self.normalize_ranges_by is not None:
                normalizer = sol.lower_bound[self.normalize_ranges_by]
                if normalizer > non_zero_flux_threshold:
                    normalized_intervals = sol[['lower_bound', 'upper_bound']].values / normalizer

                    sol['normalized_gaps'] = [self._interval_gap(interval1, interval2) for interval1, interval2 in
                                              my_zip(reference_intervals, normalized_intervals)]
                else:
                    sol['normalized_gaps'] = [numpy.nan] * len(sol.lower_bound)
            else:
                sol['normalized_gaps'] = gaps

        ref_upper_bound = self.reference_flux_ranges.upper_bound.apply(
            lambda v: 0 if abs(v) < non_zero_flux_threshold else v)
        ref_lower_bound = self.reference_flux_ranges.lower_bound.apply(
            lambda v: 0 if abs(v) < non_zero_flux_threshold else v)

        collection = list()
        for key, df in six.iteritems(solutions):
            df['biomass'] = key[0][1]
            df['production'] = key[1][1]

            df['KO'] = False
            df['flux_reversal'] = False
            df['suddenly_essential'] = False
            df['free_flux'] = False

            df.loc[
                (df.lower_bound == 0) & (
                    df.upper_bound == 0) & (
                        ref_upper_bound != 0) & (
                            ref_lower_bound != 0),
                'KO'
            ] = True

            df.loc[
                ((ref_upper_bound < 0) & (df.lower_bound > 0) | (
                    (ref_lower_bound > 0) & (df.upper_bound < 0))),
                'flux_reversal'
            ] = True

            df.loc[
                ((df.lower_bound <= 0) & (df.lower_bound > 0)) | (
                    (ref_lower_bound >= 0) & (df.upper_bound <= 0)),
                'suddenly_essential'
            ] = True

            is_reversible = numpy.asarray([
                self.design_space_model.reactions.get_by_id(i).reversibility
                for i in df.index], dtype=bool)
            not_reversible = numpy.logical_not(is_reversible)

            df.loc[
                ((df.lower_bound == -1000) & (df.upper_bound == 1000) & is_reversible) | (
                    (df.lower_bound == 0) & (df.upper_bound == 1000) & not_reversible) | (
                        (df.lower_bound == -1000) & (df.upper_bound == 0) & not_reversible),
                'free_flux'
            ] = True

            df['reaction'] = df.index
            df['excluded'] = df['reaction'].isin(self.exclude)

            collection.append(df)

#        multi_index = [(key[0][1], key[1][1]) for key in solutions]
#        solutions_multi_index = pandas.concat(list(solutions.values()),
        # axis=0, keys=multi_index)#
#        solutions_multi_index.index.set_names(['biomass', 'production',
        # 'reaction'], inplace=True)
        total = pandas.concat(collection, ignore_index=True, copy=False)
        total.sort_values(['biomass', 'production', 'reaction'], inplace=True)
        total.index = total['reaction']
        return DifferentialFVAResult(total, self.envelope, self.reference_flux_ranges, self.reference_flux_dist)
Пример #45
0
    def run(self,
            surface_only=True,
            improvements_only=True,
            progress=True,
            view=None):
        """Run the differential flux variability analysis.

        Parameters
        ----------
        surface_only : bool, optional
            If only the surface of the n-dimensional production envelope should be scanned (defaults to True).
        improvements_only : bool, optional
            If only grid points should should be scanned that constitute and improvement in production
            over the reference state (defaults to True).
        progress : bool, optional
            If a progress bar should be shown.
        view : SequentialView or MultiprocessingView or ipython.cluster.DirectView, optional
            A parallelization view (defaults to SequentialView).

        Returns
        -------
        pandas.Panel
            A pandas Panel containing a results DataFrame for every grid point scanned.
        """
        with TimeMachine() as tm:
            # Make sure that the design_space_model is initialized to its original state later
            for variable in self.variables:
                reaction = self.design_space_model.reactions.get_by_id(
                    variable)
                tm(do=int,
                   undo=partial(setattr, reaction, 'lower_bound',
                                reaction.lower_bound))
                tm(do=int,
                   undo=partial(setattr, reaction, 'upper_bound',
                                reaction.upper_bound))
            target_reaction = self.design_space_model.reactions.get_by_id(
                self.objective)
            tm(do=int,
               undo=partial(setattr, target_reaction, 'lower_bound',
                            target_reaction.lower_bound))
            tm(do=int,
               undo=partial(setattr, target_reaction, 'upper_bound',
                            target_reaction.upper_bound))

            if view is None:
                view = config.default_view
            else:
                view = view

            included_reactions = [
                reaction.id for reaction in self.reference_model.reactions
                if reaction.id not in self.exclude
            ] + self.variables + [self.objective]

            self.reference_flux_dist = pfba(self.reference_model,
                                            fraction_of_optimum=0.99)

            self.reference_flux_ranges = flux_variability_analysis(
                self.reference_model,
                reactions=included_reactions,
                view=view,
                remove_cycles=False,
                fraction_of_optimum=0.75).data_frame

            self._init_search_grid(surface_only=surface_only,
                                   improvements_only=improvements_only)

            func_obj = _DifferentialFvaEvaluator(self.design_space_model,
                                                 self.variables,
                                                 self.objective,
                                                 included_reactions)
            if progress:
                progress = ProgressBar(len(self.grid))
                results = list(
                    progress(view.imap(func_obj, self.grid.iterrows())))
            else:
                results = list(view.map(func_obj, self.grid.iterrows()))

        solutions = dict((tuple(point.iteritems()), fva_result)
                         for (point, fva_result) in results)
        reference_intervals = self.reference_flux_ranges[[
            'lower_bound', 'upper_bound'
        ]].values
        for sol in six.itervalues(solutions):
            intervals = sol[['lower_bound', 'upper_bound']].values
            gaps = [
                self._interval_gap(interval1, interval2) for interval1,
                interval2 in my_zip(reference_intervals, intervals)
            ]
            sol['gaps'] = gaps
            if self.normalize_ranges_by is not None:
                normalizer = sol.lower_bound[self.normalize_ranges_by]
                if normalizer > non_zero_flux_threshold:
                    normalized_intervals = sol[['lower_bound', 'upper_bound'
                                                ]].values / normalizer

                    sol['normalized_gaps'] = [
                        self._interval_gap(interval1, interval2)
                        for interval1, interval2 in my_zip(
                            reference_intervals, normalized_intervals)
                    ]
                else:
                    sol['normalized_gaps'] = [numpy.nan] * len(sol.lower_bound)
            else:
                sol['normalized_gaps'] = gaps

        ref_upper_bound = self.reference_flux_ranges.upper_bound.apply(
            lambda v: 0 if abs(v) < non_zero_flux_threshold else v)
        ref_lower_bound = self.reference_flux_ranges.lower_bound.apply(
            lambda v: 0 if abs(v) < non_zero_flux_threshold else v)

        collection = list()
        for key, df in six.iteritems(solutions):
            df['biomass'] = key[0][1]
            df['production'] = key[1][1]

            df['KO'] = False
            df['flux_reversal'] = False
            df['suddenly_essential'] = False
            df['free_flux'] = False

            df.loc[(df.lower_bound == 0) & (df.upper_bound == 0) &
                   (ref_upper_bound != 0) & (ref_lower_bound != 0),
                   'KO'] = True

            df.loc[((ref_upper_bound < 0) & (df.lower_bound > 0) |
                    ((ref_lower_bound > 0) & (df.upper_bound < 0))),
                   'flux_reversal'] = True

            df.loc[((df.lower_bound <= 0) & (df.lower_bound > 0)) |
                   ((ref_lower_bound >= 0) & (df.upper_bound <= 0)),
                   'suddenly_essential'] = True

            is_reversible = numpy.asarray([
                self.design_space_model.reactions.get_by_id(i).reversibility
                for i in df.index
            ],
                                          dtype=bool)
            not_reversible = numpy.logical_not(is_reversible)

            df.loc[((df.lower_bound == -1000) &
                    (df.upper_bound == 1000) & is_reversible) |
                   ((df.lower_bound == 0) &
                    (df.upper_bound == 1000) & not_reversible) |
                   ((df.lower_bound == -1000) &
                    (df.upper_bound == 0) & not_reversible),
                   'free_flux'] = True

            df['reaction'] = df.index
            df['excluded'] = df['reaction'].isin(self.exclude)

            collection.append(df)


#        multi_index = [(key[0][1], key[1][1]) for key in solutions]
#        solutions_multi_index = pandas.concat(list(solutions.values()),
# axis=0, keys=multi_index)#
#        solutions_multi_index.index.set_names(['biomass', 'production',
# 'reaction'], inplace=True)
        total = pandas.concat(collection, ignore_index=True, copy=False)
        total.sort_values(['biomass', 'production', 'reaction'], inplace=True)
        total.index = total['reaction']
        return DifferentialFVAResult(total, self.envelope,
                                     self.reference_flux_ranges,
                                     self.reference_flux_dist)
Пример #46
0
    def run(self,
            target=None,
            max_enforced_flux=0.9,
            number_of_results=10,
            exclude=(),
            simulation_method=fba,
            simulation_kwargs=None):
        """
        Performs a Flux Scanning based on Enforced Objective Flux (FSEOF) analysis.

        Parameters
        ----------
        target: str, Reaction, Metabolite
            The target for optimization.
        max_enforced_flux : float, optional
            The maximal flux of secondary_objective that will be enforced, relative to the theoretical maximum (
            defaults to 0.9).
        number_of_results : int, optional
            The number of enforced flux levels (defaults to 10).
        exclude : Iterable of reactions or reaction ids that will not be included in the output.

        Returns
        -------
        FseofResult
            An object containing the identified reactions and the used parameters.

        References
        ----------
        .. [1] H. S. Choi, S. Y. Lee, T. Y. Kim, and H. M. Woo, 'In silico identification of gene amplification targets
        for improvement of lycopene production.,' Appl Environ Microbiol, vol. 76, no. 10, pp. 3097–3105, May 2010.

        """
        model = self.model
        target = get_reaction_for(model, target)

        simulation_kwargs = simulation_kwargs if simulation_kwargs is not None else {}
        simulation_kwargs['objective'] = self.primary_objective

        if 'reference' not in simulation_kwargs:
            reference = simulation_kwargs['reference'] = pfba(
                model, **simulation_kwargs)
        else:
            reference = simulation_kwargs['reference']

        ndecimals = config.ndecimals

        # Exclude list
        exclude = list(exclude) + model.exchanges
        exclude_ids = [target.id]
        for reaction in exclude:
            if isinstance(reaction, Reaction):
                exclude_ids.append(reaction.id)
            else:
                exclude_ids.append(reaction)

        with TimeMachine() as tm:

            tm(do=int,
               undo=partial(setattr, model, "objective", model.objective))
            tm(do=int,
               undo=partial(setattr, target, "lower_bound",
                            target.lower_bound))
            tm(do=int,
               undo=partial(setattr, target, "upper_bound",
                            target.upper_bound))

            # Find initial flux of enforced reaction
            initial_fluxes = reference.fluxes
            initial_flux = round(initial_fluxes[target.id], ndecimals)

            # Find theoretical maximum of enforced reaction
            max_theoretical_flux = round(
                fba(model, objective=target.id,
                    reactions=[target.id]).fluxes[target.id], ndecimals)

            max_flux = max_theoretical_flux * max_enforced_flux

            # Calculate enforcement levels
            levels = [
                initial_flux + (i + 1) *
                (max_flux - initial_flux) / number_of_results
                for i in range(number_of_results)
            ]

            # FSEOF results
            results = {reaction.id: [] for reaction in model.reactions}

            for level in levels:
                target.lower_bound = level
                target.upper_bound = level
                solution = simulation_method(model, **simulation_kwargs)
                for reaction_id, flux in solution.fluxes.iteritems():
                    results[reaction_id].append(round(flux, ndecimals))

        # Test each reaction
        fseof_reactions = []
        for reaction_id, fluxes in results.items():
            if reaction_id not in exclude_ids \
                    and max(abs(max(fluxes)), abs(min(fluxes))) > abs(reference[reaction_id]) \
                    and min(fluxes) * max(fluxes) >= 0:
                fseof_reactions.append(model.reactions.get_by_id(reaction_id))

        results = {rea.id: results[rea.id] for rea in fseof_reactions}
        run_args = dict(max_enforced_flux=max_enforced_flux,
                        number_of_results=number_of_results,
                        solution_method=simulation_method,
                        simulation_kwargs=simulation_kwargs,
                        exclude=exclude)

        return FSEOFResult(fseof_reactions, target, model,
                           self.primary_objective, levels, results, run_args,
                           reference)
Пример #47
0
 def simulation_kwargs(self, simulation_kwargs):
     if self.simulation_method in [lmoma, moma, room] and simulation_kwargs.get("reference", None) is None:
         logger.warning("No WT reference found, generating using pfba.")
         simulation_kwargs['reference'] = pfba(self.model).fluxes
         logger.warning("Reference successfully computed.")
     self._simulation_kwargs = simulation_kwargs
Пример #48
0
 def simulation_method(self, simulation_method):
     if simulation_method in [lmoma, moma, room] and self.wt_reference is None:
         logger.info("No WT reference found, generating using pfba.")
         self.wt_reference = pfba(self.model).fluxes
     self._simulation_method = simulation_method
Пример #49
0
    def run(self, target=None, max_enforced_flux=0.9, number_of_results=10, exclude=(), simulation_method=fba,
            simulation_kwargs=None):
        """
        Performs a Flux Scanning based on Enforced Objective Flux (FSEOF) analysis.

        Parameters
        ----------
        target: str, Reaction, Metabolite
            The target for optimization.
        max_enforced_flux : float, optional
            The maximal flux of secondary_objective that will be enforced, relative to the theoretical maximum (
            defaults to 0.9).
        number_of_results : int, optional
            The number of enforced flux levels (defaults to 10).
        exclude : Iterable of reactions or reaction ids that will not be included in the output.

        Returns
        -------
        FseofResult
            An object containing the identified reactions and the used parameters.

        References
        ----------
        .. [1] H. S. Choi, S. Y. Lee, T. Y. Kim, and H. M. Woo, 'In silico identification of gene amplification targets
        for improvement of lycopene production.,' Appl Environ Microbiol, vol. 76, no. 10, pp. 3097–3105, May 2010.

        """
        model = self.model
        target = get_reaction_for(model, target)

        simulation_kwargs = simulation_kwargs if simulation_kwargs is not None else {}
        simulation_kwargs['objective'] = self.primary_objective

        if 'reference' not in simulation_kwargs:
            reference = simulation_kwargs['reference'] = pfba(model, **simulation_kwargs)
        else:
            reference = simulation_kwargs['reference']

        ndecimals = config.ndecimals

        # Exclude list
        exclude = list(exclude) + model.boundary
        exclude_ids = [target.id]
        for reaction in exclude:
            if isinstance(reaction, Reaction):
                exclude_ids.append(reaction.id)
            else:
                exclude_ids.append(reaction)

        with TimeMachine() as tm:

            tm(do=int, undo=partial(setattr, model, "objective", model.objective))
            tm(do=int, undo=partial(setattr, target, "lower_bound", target.lower_bound))
            tm(do=int, undo=partial(setattr, target, "upper_bound", target.upper_bound))

            # Find initial flux of enforced reaction
            initial_fluxes = reference.fluxes
            initial_flux = round(initial_fluxes[target.id], ndecimals)

            # Find theoretical maximum of enforced reaction
            max_theoretical_flux = round(fba(model, objective=target.id, reactions=[target.id]).fluxes[target.id],
                                         ndecimals)

            max_flux = max_theoretical_flux * max_enforced_flux

            # Calculate enforcement levels
            levels = [initial_flux + (i + 1) * (max_flux - initial_flux) / number_of_results for i in
                      range(number_of_results)]

            # FSEOF results
            results = {reaction.id: [] for reaction in model.reactions}

            for level in levels:
                target.lower_bound = level
                target.upper_bound = level
                solution = simulation_method(model, **simulation_kwargs)
                for reaction_id, flux in solution.fluxes.iteritems():
                    results[reaction_id].append(round(flux, ndecimals))

        # Test each reaction
        fseof_reactions = []
        for reaction_id, fluxes in results.items():
            if reaction_id not in exclude_ids \
                    and max(abs(max(fluxes)), abs(min(fluxes))) > abs(reference[reaction_id]) \
                    and min(fluxes) * max(fluxes) >= 0:
                fseof_reactions.append(model.reactions.get_by_id(reaction_id))

        results = {rea.id: results[rea.id] for rea in fseof_reactions}
        run_args = dict(max_enforced_flux=max_enforced_flux,
                        number_of_results=number_of_results,
                        solution_method=simulation_method,
                        simulation_kwargs=simulation_kwargs,
                        exclude=exclude)

        return FSEOFResult(fseof_reactions, target, model, self.primary_objective, levels, results, run_args, reference)
Пример #50
0
    def run(self,
            surface_only=True,
            improvements_only=True,
            progress=True,
            view=None,
            fraction_of_optimum=1.0):
        """Run the differential flux variability analysis.

        Parameters
        ----------
        surface_only : bool, optional
            If only the surface of the n-dimensional production envelope should be scanned (defaults to True).
        improvements_only : bool, optional
            If only grid points should should be scanned that constitute and improvement in production
            over the reference state (defaults to True).
        progress : bool, optional
            If a progress bar should be shown.
        view : SequentialView or MultiprocessingView or ipython.cluster.DirectView, optional
            A parallelization view (defaults to SequentialView).
        fraction_of_optimum : float, optional
            A value between zero and one that determines the width of the
            flux ranges of the reference solution. The lower the value,
            the larger the ranges.

        Returns
        -------
        pandas.Panel
            A pandas Panel containing a results DataFrame for every grid point scanned.
        """
        # Calculate the reference state.
        self.reference_flux_dist = pfba(
            self.reference_model, fraction_of_optimum=fraction_of_optimum)

        self.reference_flux_ranges = flux_variability_analysis(
            self.reference_model,
            reactions=self.included_reactions,
            view=view,
            remove_cycles=False,
            fraction_of_optimum=fraction_of_optimum).data_frame
        self.reference_flux_ranges[
            self.reference_flux_ranges.abs() < non_zero_flux_threshold] = 0.0
        reference_intervals = self.reference_flux_ranges.loc[
            self.included_reactions, ['lower_bound', 'upper_bound']].values

        if self.normalize_ranges_by is not None:
            logger.debug(
                self.reference_flux_ranges.loc[self.normalize_ranges_by, ])
            # The most obvious flux to normalize by is the biomass reaction
            # flux. This is probably always greater than zero. Just in case
            # the model is defined differently or some other normalizing
            # reaction is chosen, we use the absolute value.
            norm = abs(self.reference_flux_ranges.at[self.normalize_ranges_by,
                                                     "lower_bound"])
            if norm > non_zero_flux_threshold:
                normalized_reference_intervals = reference_intervals / norm
            else:
                raise ValueError(
                    "The reaction that you have chosen for normalization '{}' "
                    "has zero flux in the reference state. Please choose another "
                    "one.".format(self.normalize_ranges_by))

        with TimeMachine() as tm:
            # Make sure that the design_space_model is initialized to its original state later
            for variable in self.variables:
                reaction = self.design_space_model.reactions.get_by_id(
                    variable)
                tm(do=int,
                   undo=partial(setattr, reaction, 'lower_bound',
                                reaction.lower_bound))
                tm(do=int,
                   undo=partial(setattr, reaction, 'upper_bound',
                                reaction.upper_bound))
            target_reaction = self.design_space_model.reactions.get_by_id(
                self.objective)
            tm(do=int,
               undo=partial(setattr, target_reaction, 'lower_bound',
                            target_reaction.lower_bound))
            tm(do=int,
               undo=partial(setattr, target_reaction, 'upper_bound',
                            target_reaction.upper_bound))

            if view is None:
                view = config.default_view
            else:
                view = view

            self._init_search_grid(surface_only=surface_only,
                                   improvements_only=improvements_only)

            func_obj = _DifferentialFvaEvaluator(self.design_space_model,
                                                 self.variables,
                                                 self.objective,
                                                 self.included_reactions)
            if progress:
                progress = ProgressBar(len(self.grid))
                results = list(
                    progress(view.imap(func_obj, self.grid.iterrows())))
            else:
                results = list(view.map(func_obj, self.grid.iterrows()))

        solutions = dict((tuple(point.iteritems()), fva_result)
                         for (point, fva_result) in results)

        for sol in solutions.values():
            sol[sol.abs() < non_zero_flux_threshold] = 0.0
            intervals = sol.loc[self.included_reactions,
                                ['lower_bound', 'upper_bound']].values
            gaps = [
                self._interval_gap(interval1, interval2)
                for interval1, interval2 in zip(reference_intervals, intervals)
            ]
            sol['gaps'] = gaps
            if self.normalize_ranges_by is not None:
                # See comment above regarding normalization.
                normalizer = abs(sol.lower_bound[self.normalize_ranges_by])
                if normalizer > non_zero_flux_threshold:
                    normalized_intervals = sol.loc[
                        self.included_reactions,
                        ['lower_bound', 'upper_bound']].values / normalizer

                    sol['normalized_gaps'] = [
                        self._interval_gap(interval1, interval2)
                        for interval1, interval2 in zip(
                            normalized_reference_intervals,
                            normalized_intervals)
                    ]
                else:
                    sol['normalized_gaps'] = numpy.nan
            else:
                sol['normalized_gaps'] = gaps

        # Determine where the reference flux range overlaps with zero.
        zero_overlap_mask = numpy.asarray([
            self._interval_overlap(interval1, (0, 0)) > 0
            for interval1 in reference_intervals
        ],
                                          dtype=bool)
        collection = list()
        for key, df in solutions.items():
            df['biomass'] = key[0][1]
            df['production'] = key[1][1]

            df['KO'] = False
            df['flux_reversal'] = False
            df['suddenly_essential'] = False
            df['free_flux'] = False

            df.loc[(df.lower_bound == 0) & (df.upper_bound == 0) &
                   (~zero_overlap_mask), 'KO'] = True

            df.loc[((self.reference_flux_ranges.upper_bound < 0) &
                    (df.lower_bound > 0) |
                    ((self.reference_flux_ranges.lower_bound > 0) &
                     (df.upper_bound < 0))), 'flux_reversal'] = True

            df.loc[(zero_overlap_mask & (df.lower_bound > 0)) |
                   (zero_overlap_mask & (df.upper_bound < 0)),
                   'suddenly_essential'] = True

            is_reversible = numpy.asarray([
                self.design_space_model.reactions.get_by_id(i).reversibility
                for i in df.index
            ],
                                          dtype=bool)
            not_reversible = ~is_reversible

            df.loc[((df.lower_bound == -1000) &
                    (df.upper_bound == 1000) & is_reversible) |
                   ((df.lower_bound == 0) &
                    (df.upper_bound == 1000) & not_reversible) |
                   ((df.lower_bound == -1000) &
                    (df.upper_bound == 0) & not_reversible),
                   'free_flux'] = True

            df['reaction'] = df.index
            df['excluded'] = df['reaction'].isin(self.exclude)

            collection.append(df)


#        multi_index = [(key[0][1], key[1][1]) for key in solutions]
#        solutions_multi_index = pandas.concat(list(solutions.values()),
# axis=0, keys=multi_index)#
#        solutions_multi_index.index.set_names(['biomass', 'production',
# 'reaction'], inplace=True)
        total = pandas.concat(collection, ignore_index=True, copy=False)
        total.sort_values(['biomass', 'production', 'reaction'], inplace=True)
        total.index = total['reaction']
        return DifferentialFVAResult(total, self.envelope,
                                     self.reference_flux_ranges)