Mixed_Model.default_parameters.update(default_parameters)

    # Produce plots
    times = [60]
    # No Negative Feedback
    Mixed_Model.set_parameters({'kSOCSon': 0, 'kIntBasal_r1': 0, 'kIntBasal_r2': 0, 'kint_a': 0, 'kint_b': 0})

    dradf = Mixed_Model.doseresponse(times, 'TotalpSTAT', 'Ia', list(logspace(-2, 8)),
                                     parameters={'Ib': 0}, return_type='dataframe', dataframe_labels='Alpha',
                                     scale_factor=scale_factor)
    drbdf = Mixed_Model.doseresponse(times, 'TotalpSTAT', 'Ib', list(logspace(-2, 8)),
                                     parameters={'Ia': 0}, return_type='dataframe', dataframe_labels='Beta',
                                     scale_factor=scale_factor)

    # Show internalization effects
    Mixed_Model.reset_parameters()
    Mixed_Model.set_parameters({'kSOCSon': 0})
    dradf_int = Mixed_Model.doseresponse(times, 'TotalpSTAT', 'Ia', list(logspace(-2, 8)),
                                         parameters={'Ib': 0}, return_type='dataframe', dataframe_labels='Alpha',
                                         scale_factor=scale_factor)
    drbdf_int = Mixed_Model.doseresponse(times, 'TotalpSTAT', 'Ib', list(logspace(-2, 8)),
                                         parameters={'Ia': 0}, return_type='dataframe', dataframe_labels='Beta',
                                         scale_factor=scale_factor)

    # Show SOCS effects
    Mixed_Model.reset_parameters()
    Mixed_Model.set_parameters({'kIntBasal_r1': 0, 'kIntBasal_r2': 0, 'kint_a': 0, 'kint_b': 0})
    # Uncomment this line to tune IFN alpha internalization to match SOCS
    #Mixed_Model.set_parameters({'kSOCS': Mixed_Model.parameters['kSOCS'] * 2.5})
    dradf_rec = Mixed_Model.doseresponse(times, 'TotalpSTAT', 'Ia', list(logspace(-2, 8)),
                                         parameters={'Ib': 0}, return_type='dataframe', dataframe_labels='Alpha',
Example #2
0
class DualMixedPopulation:
    """
        Documentation - A DualMixedPopulation instance contains two IfnModels which describe two subpopulations.

        Attributes
        ----------
        name = name of model (for import)
        model_1 = IfnModel(name)
        model_2 = IfnModel(name)
        w1 = float in [0,1] reflecting fraction of total population described by model_1
        w2 = float in [0,1] reflecting fraction of total population described by model_2

        Methods
        -------
        mixed_dose_response(): perform a dose response as per any IfnModel, but weighted by each subpopulation
        stepwise_fit(): perform a stepwise fit of the mixed population model to given data
        """
    def __init__(self, name, pop1_weight, pop2_weight):
        self.name = name
        self.model_1 = IfnModel(name)
        self.model_2 = IfnModel(name)
        self.w1 = pop1_weight
        self.w2 = pop2_weight

    def set_parameters(self, param_dict):
        self.model_1.set_parameters(param_dict)
        self.model_2.set_parameters(param_dict)

    def reset_global_parameters(self):
        """
        This method is not safe for maintaining detailed balance (ie. no D.B. check)
        :return: None
        """
        self.model_1.reset_parameters()
        self.model_2.reset_parameters()

    def update_parameters(self, param_dict):
        """
        This method will act like set_parameters for any parameters that do not end in '_1' or '_2'.
        Parameters with names ending in '_1' or '_2' will be updated only in Model 1 or Model 2 respectively.
        :param param_dict: dictionary of parameter names and the values to use
        :return: 0
        """
        shared_parameters = {
            key: value
            for key, value in param_dict.items() if key[-2] != '_'
        }
        model1_parameters = {
            key[:-2]: value
            for key, value in param_dict.items() if key[-2:] == '_1'
        }
        model2_parameters = {
            key[:-2]: value
            for key, value in param_dict.items() if key[-2:] == '_2'
        }
        self.model_1.set_parameters(shared_parameters)
        self.model_2.set_parameters(shared_parameters)
        self.model_1.set_parameters(model1_parameters)
        self.model_2.set_parameters(model2_parameters)
        return 0

    def get_parameters(self):
        """
        This method will retrieve all parameters from each of model_1 and model_2, and return a parameter dictionary
        of the form {pname: pvalue} where the pname will have '_1' if its value is unique to model_1 and '_2' if it is
        unique to model_2.
        :return: dict
        """
        all_parameters = {}
        for key, value in self.model_1.parameters.items():
            if self.model_1.parameters[key] != self.model_2.parameters[key]:
                all_parameters[key + '_1'] = self.model_1.parameters[key]
                all_parameters[key + '_2'] = self.model_2.parameters[key]
            else:
                all_parameters[key] = self.model_1.parameters[key]
        return all_parameters

    def mixed_dose_response(self,
                            times,
                            observable,
                            dose_species,
                            doses,
                            parameters={},
                            sf=1,
                            **kwargs):
        return_type = kwargs.get('return_type', 'DataFrame')
        if return_type not in ['DataFrame', 'IfnData']:
            raise TypeError('Invalid return type requested')

        response_1 = self.model_1.doseresponse(
            times, observable, dose_species, doses,
            parameters=parameters)[observable]
        response_2 = self.model_2.doseresponse(
            times, observable, dose_species, doses,
            parameters=parameters)[observable]

        weighted_sum_response = np.add(np.multiply(response_1, self.w1),
                                       np.multiply(response_2, self.w2))
        if sf != 1:
            weighted_sum_response = [[el * sf for el in row]
                                     for row in weighted_sum_response]
        if dose_species == 'Ia':
            labelled_data = [[
                'Alpha', doses[row], *[(el, nan)
                                       for el in weighted_sum_response[row]]
            ] for row in range(0, len(weighted_sum_response))]
        elif dose_species == 'Ib':
            labelled_data = [[
                'Beta', doses[row], *[(el, nan)
                                      for el in weighted_sum_response[row]]
            ] for row in range(0, len(weighted_sum_response))]
        else:
            labelled_data = [[
                'Cytokine', doses[row], *[(el, nan)
                                          for el in weighted_sum_response[row]]
            ] for row in range(0, len(weighted_sum_response))]

        column_labels = ['Dose_Species', 'Dose (pM)'
                         ] + [str(el) for el in times]

        drdf = pd.DataFrame.from_records(labelled_data, columns=column_labels)
        drdf.set_index(['Dose_Species', 'Dose (pM)'], inplace=True)

        if return_type == 'DataFrame':
            return drdf
        if return_type == 'IfnData':
            return IfnData(name='custom', df=drdf, conditions=parameters)

    def __score_mixed_models__(self, shared_parameters, mixed_parameters,
                               data):
        # ------------------------------
        # Initialize variables
        # ------------------------------
        times = data.get_times(species='Alpha')
        alpha_doses = data.get_doses(species='Alpha')
        beta_doses = data.get_doses(species='Beta')

        model_1_old_parameters = self.model_1.parameters
        model_2_old_parameters = self.model_2.parameters

        # Set parameters for each population
        self.model_1.set_parameters(shared_parameters)
        self.model_1.set_parameters(mixed_parameters[0])

        self.model_2.set_parameters(shared_parameters)
        self.model_1.set_parameters(mixed_parameters[1])

        # -------------------------
        # Make predictions
        # -------------------------
        alpha_response = self.mixed_dose_response(times,
                                                  'TotalpSTAT',
                                                  'Ia',
                                                  alpha_doses,
                                                  parameters={'Ib': 0})
        beta_response = self.mixed_dose_response(times,
                                                 'TotalpSTAT',
                                                 'Ib',
                                                 beta_doses,
                                                 parameters={'Ia': 0})
        total_response = pd.concat([alpha_response, beta_response])

        # -------------------------
        # Score predictions vs data
        # -------------------------
        def __score_target__(scf, data, sim):
            diff_table = np.zeros((len(data), len(data[0])))
            for r in range(len(data)):
                for c in range(len(data[r])):
                    if not np.isnan(data[r][c][1]):
                        diff_table[r][c] = (sim[r][c][0] * scf -
                                            data[r][c][0]) / data[r][c][1]
                    else:
                        diff_table[r][c] = (sim[r][c][0] * scf - data[r][c][0])
            return np.sum(np.square(diff_table))

        opt = minimize(__score_target__, [0.1],
                       args=(data.data_set.values, total_response.values))
        sf = opt['x'][0]
        score = opt['fun']

        self.model_1.set_parameters(model_1_old_parameters)
        self.model_2.set_parameters(model_2_old_parameters)
        return score, sf

    def stepwise_fit(self, data, parameters_to_test, ntest_per_param, mixed_p):
        number_of_parameters = len(parameters_to_test.keys())
        final_fit = OrderedDict({})

        # Local scope function
        def separate_parameters(p_to_test, mixed_p_list):
            shared_variables = {}
            mixed_variables = [{}, {}]
            for key, value in p_to_test.items():
                if key[-3:] == '__1':
                    if key[0:-3] in mixed_p_list:
                        mixed_variables[0].update({key[0:-3]: value})
                elif key[-3:] == '__2':
                    if key[0:-3] in mixed_p_list:
                        mixed_variables[1].update({key[0:-3]: value})
                else:
                    shared_variables.update({key: value})
            return shared_variables, mixed_variables,

        # Fit each parameter, ordered from most important to least
        initial_score, _ = self.__score_mixed_models__({}, [{}, {}], data)
        for i in range(number_of_parameters):
            print("{}% of the way done".format(i * 100 / number_of_parameters))
            reference_score = 0
            best_scale_factor = 1
            best_parameter = []
            # Test all remaining parameters, using previously fit values
            for p, (min_test_val, max_test_val) in parameters_to_test.items():
                residuals = []
                scale_factor_list = []
                # Try all test values for current parameter
                for j in np.linspace(min_test_val, max_test_val,
                                     ntest_per_param):
                    test_parameters = {
                        p: j,
                        **final_fit
                    }  # Includes previously fit parameters
                    base_parameters, subpopulation_parameters = separate_parameters(
                        test_parameters, mixed_p)
                    score, scale_factor = self.__score_mixed_models__(
                        base_parameters, subpopulation_parameters, data)
                    residuals.append(score)
                    scale_factor_list.append(scale_factor)
                # Choose best value for current parameter
                best_parameter_value = np.linspace(
                    min_test_val, max_test_val,
                    ntest_per_param)[residuals.index(min(residuals))]
                # Decide if this is the best parameter so far in this round of 'i' loop
                if min(residuals) < reference_score or reference_score == 0:
                    reference_score = min(residuals)
                    best_scale_factor = scale_factor_list[residuals.index(
                        min(residuals))]
                    best_parameter = [p, best_parameter_value]
            # Record the next best parameter and remove it from parameters left to test
            final_fit.update({best_parameter[0]: best_parameter[1]})
            final_scale_factor = best_scale_factor
            del parameters_to_test[best_parameter[0]]
        print("Score improved from {} to {} after {} iterations".format(
            initial_score, reference_score, number_of_parameters))
        final_shared_parameters, final_mixed_parameters = separate_parameters(
            final_fit, mixed_p)
        return final_shared_parameters, final_mixed_parameters, final_scale_factor