Ejemplo n.º 1
0
 def test_agent_times_choice(self):
     expression = 'agent_x_choice.agent_times_choice(attr)'
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='agents', 
         table_data={'id': array([1, 2, 3, 4, 5]), 'attr_2': array([3,   2,   4,   10, 20]), 
                                                   'attr_3': array([10, 100, 1000, 500, 0]),
                                                   'attr_4': array([100, 500, 0, 20, -30])
                     }
         )
     storage.write_table(table_name='choices', 
         table_data={'id': array([1, 2, 3, 4])}
         )
     agents = Dataset(in_storage=storage, in_table_name='agents', dataset_name='agent', id_name='id')
     choices = Dataset(in_storage=storage, in_table_name='choices', dataset_name='choice', id_name='id')
     ids = InteractionDataset(dataset1=agents, dataset2=choices, index1=array([0,1,3,4]), index2=array([1,2,3])) 
     result = ids.compute_variables(expression)
     should_be = array([[3, 10, 100], [2,100,500], [10,500, 20], [20, 0, -30]])
     self.assertEqual(ma.allequal(result, should_be), True)
     
     agents.touch_attribute('attr_2') # in order to recompute the expression
     choices.add_primary_attribute(name='name', data=array(['bus', 'car', 'tran', 'walk']))
     agents.add_primary_attribute(name='attr_tran', data=array([100, 1000, 10000, 5000,10]))
     result = ids.compute_variables(expression)
     should_be = array([[3, 100, 100], [2,1000,500], [10,5000, 20], [20, 10, -30]])
     self.assertEqual(ma.allequal(result, should_be), True)
    def test_agent_times_choice(self):
        expression = "agent_x_choice.agent_times_choice(attr)"
        storage = StorageFactory().get_storage("dict_storage")
        storage.write_table(
            table_name="agents",
            table_data={
                "id": array([1, 2, 3, 4, 5]),
                "attr_2": array([3, 2, 4, 10, 20]),
                "attr_3": array([10, 100, 1000, 500, 0]),
                "attr_4": array([100, 500, 0, 20, -30]),
            },
        )
        storage.write_table(table_name="choices", table_data={"id": array([1, 2, 3, 4])})
        agents = Dataset(in_storage=storage, in_table_name="agents", dataset_name="agent", id_name="id")
        choices = Dataset(in_storage=storage, in_table_name="choices", dataset_name="choice", id_name="id")
        ids = InteractionDataset(dataset1=agents, dataset2=choices, index1=array([0, 1, 3, 4]), index2=array([1, 2, 3]))
        result = ids.compute_variables(expression)
        should_be = array([[3, 10, 100], [2, 100, 500], [10, 500, 20], [20, 0, -30]])
        self.assertEqual(ma.allequal(result, should_be), True)

        agents.touch_attribute("attr_2")  # in order to recompute the expression
        choices.add_primary_attribute(name="name", data=array(["bus", "car", "tran", "walk"]))
        agents.add_primary_attribute(name="attr_tran", data=array([100, 1000, 10000, 5000, 10]))
        result = ids.compute_variables(expression)
        should_be = array([[3, 100, 100], [2, 1000, 500], [10, 5000, 20], [20, 10, -30]])
        self.assertEqual(ma.allequal(result, should_be), True)
Ejemplo n.º 3
0
 def __init__(self, resources=None, dataset1=None, dataset2=None, index1 = None, index2 = None, 
             debuglevel=0):
     debug = DebugPrinter(debuglevel)
     debug.print_debug("Creating object %s.%s" % (self.__class__.__module__, self.__class__.__name__), 2)
     
     local_resources = Resources(resources)
     local_resources.merge_if_not_None({"dataset1":dataset1, 
         "dataset2":dataset2, "debug":debug, 
         "index1":index1, "index2":index2})
     CoreInteractionDataset.__init__(self, resources = local_resources)
     
     
Ejemplo n.º 4
0
 def create_interaction_dataset(self,
                                dataset1,
                                dataset2,
                                index1=None,
                                index2=None):
     return InteractionDataset(dataset1=dataset1,
                               dataset2=dataset2,
                               index1=index1,
                               index2=index2)
Ejemplo n.º 5
0
    def __init__(self,
                 resources=None,
                 dataset1=None,
                 dataset2=None,
                 index1=None,
                 index2=None,
                 debuglevel=0):
        debug = DebugPrinter(debuglevel)
        debug.print_debug(
            "Creating object %s.%s" %
            (self.__class__.__module__, self.__class__.__name__), 2)

        local_resources = Resources(resources)
        local_resources.merge_if_not_None({
            "dataset1": dataset1,
            "dataset2": dataset2,
            "debug": debug,
            "index1": index1,
            "index2": index2
        })
        CoreInteractionDataset.__init__(self, resources=local_resources)
Ejemplo n.º 6
0
                            data=[85,20,0,90,35,51,0,10,5])

from opus_core.datasets.dataset_pool import DatasetPool
dataset_pool = DatasetPool(package_order=['urbansim', 'opus_core'], storage=storage)
dataset_pool.datasets_in_pool()
hs = dataset_pool.get_dataset("household")
dataset_pool.datasets_in_pool()
hs.size()
constant = dataset_pool.get_dataset("urbansim_constant")
constant["percent_coverage_threshold"] = 50
locations.compute_variables(["urbansim.gridcell.is_in_wetland"], dataset_pool=dataset_pool)
locations.get_attribute("is_in_wetland")

# Interaction variables
from opus_core.datasets.interaction_dataset import InteractionDataset
interactions = InteractionDataset(dataset1 = households, dataset2 = locations)
interactions.get_dataset_name()

from numpy import arange
interactions = InteractionDataset(dataset1 = households, dataset2 = locations, index1 = arange(5), index2 = arange(3))
interactions.compute_variables(["urbansim.household_x_gridcell.cost_times_income"])

specification = EquationSpecification(
                     variables=array([
                          "gridcell.cost",
                          "urbansim.household_x_gridcell.cost_times_income"]),
                     coefficients=array(["costcoef", "cti_coef"]))
households.add_primary_attribute(data=[2,8,3,1,5,4,9,7,3,6], name="location")
coef, other_results = hlcm.estimate(specification, households)

# Versioning
Ejemplo n.º 7
0
    def run(self,
            dataset1,
            dataset2,
            index1=None,
            index2=None,
            sample_size=10,
            weight=None,
            include_chosen_choice=False,
            with_replacement=False,
            resources=None,
            dataset_pool=None):
        """this function samples number of sample_size (scalar value) alternatives from dataset2
        for agent set specified by dataset1.
        If index1 is not None, only samples alterantives for agents with indices in index1;
        if index2 is not None, only samples alternatives from indices in index2.
        sample_size specifies number of alternatives to be sampled for each agent.
        weight, to be used as sampling weight, is either an attribute name of dataset2, or a 1d
        array of the same length as index2 or 2d array of shape (index1.size, index2.size).

        Also refer to document of interaction_dataset"""

        if dataset_pool is None:
            try:
                sc = SessionConfiguration()
                dataset_pool = sc.get_dataset_pool()
            except:
                dataset_pool = DatasetPool()

        local_resources = Resources(resources)
        local_resources.merge_if_not_None({
            "dataset1":
            dataset1,
            "dataset2":
            dataset2,
            "index1":
            index1,
            "index2":
            index2,
            "sample_size":
            sample_size,
            "weight":
            weight,
            "with_replacement":
            with_replacement,
            "include_chosen_choice":
            include_chosen_choice
        })

        local_resources.check_obligatory_keys(
            ['dataset1', 'dataset2', 'sample_size'])
        agent = local_resources["dataset1"]
        index1 = local_resources.get("index1", None)
        if index1 is None:
            index1 = arange(agent.size())
        choice = local_resources["dataset2"]
        index2 = local_resources.get("index2", None)
        if index2 is None:
            index2 = arange(choice.size())

        if index1.size == 0 or index2.size == 0:
            err_msg = "either choice size or agent size is zero, return None"
            logger.log_warning(err_msg)
            return None

        include_chosen_choice = local_resources.get("include_chosen_choice",
                                                    False)
        J = local_resources["sample_size"]
        if include_chosen_choice:
            J = J - 1

        with_replacement = local_resources.get("with_replacement")

        weight = local_resources.get("weight", None)
        if isinstance(weight, str):
            if weight in choice.get_known_attribute_names():
                weight = choice.get_attribute(weight)
                rank_of_weight = 1
            else:
                varname = VariableName(weight)
                if varname.get_dataset_name() == choice.get_dataset_name():
                    weight = choice.compute_variables(
                        weight, dataset_pool=dataset_pool)
                    rank_of_weight = 1
                elif varname.get_interaction_set_names() is not None:
                    ## weights can be an interaction variable
                    interaction_dataset = InteractionDataset(local_resources)
                    weight = interaction_dataset.compute_variables(
                        weight, dataset_pool=dataset_pool)
                    rank_of_weight = 2
                    assert (len(weight.shape) >= rank_of_weight)
                else:
                    err_msg = ("weight is neither a known attribute name "
                               "nor a simple variable from the choice dataset "
                               "nor an interaction variable: '%s'" % weight)
                    logger.log_error(err_msg)
                    raise ValueError, err_msg
        elif isinstance(weight, ndarray):
            rank_of_weight = weight.ndim
        elif not weight:  ## weight is None or empty string
            weight = ones(index2.size)
            rank_of_weight = 1
        else:
            err_msg = "unkown weight type"
            logger.log_error(err_msg)
            raise TypeError, err_msg

        if (weight.size <> index2.size) and (weight.shape[rank_of_weight - 1]
                                             <> index2.size):
            if weight.shape[rank_of_weight - 1] == choice.size():
                if rank_of_weight == 1:
                    weight = take(weight, index2)
                if rank_of_weight == 2:
                    weight = take(weight, index2, axis=1)
            else:
                err_msg = "weight array size doesn't match to size of dataset2 or its index"
                logger.log_error(err_msg)
                raise ValueError, err_msg

        prob = normalize(weight)

        #chosen_choice = ones(index1.size) * UNPLACED_ID
        chosen_choice_id = agent.get_attribute(choice.get_id_name()[0])[index1]
        #index_of_placed_agent = where(greater(chosen_choice_id, UNPLACED_ID))[0]
        chosen_choice_index = choice.try_get_id_index(
            chosen_choice_id, return_value_if_not_found=UNPLACED_ID)
        chosen_choice_index_to_index2 = lookup(chosen_choice_index,
                                               index2,
                                               index_if_not_found=UNPLACED_ID)

        if rank_of_weight == 1:  # if weight_array is 1d, then each agent shares the same weight for choices
            replace = with_replacement  # sampling with no replacement
            non_zero_counts = nonzerocounts(weight)
            if non_zero_counts < J:
                logger.log_warning(
                    "weight array dosen't have enough non-zero counts, use sample with replacement"
                )
                replace = True
            if non_zero_counts > 0:
                sampled_index = prob2dsample(
                    index2,
                    sample_size=(index1.size, J),
                    prob_array=prob,
                    exclude_index=chosen_choice_index_to_index2,
                    replace=replace,
                    return_index=True)
            else:
                # all alternatives have a zero weight
                sampled_index = zeros((index1.size, 0), dtype=DTYPE)
            #return index2[sampled_index]

        if rank_of_weight == 2:
            sampled_index = zeros((index1.size, J), dtype=DTYPE) - 1

            for i in range(index1.size):
                replace = with_replacement  # sampling with/without replacement
                i_prob = prob[i, :]
                if nonzerocounts(i_prob) < J:
                    logger.log_warning(
                        "weight array dosen't have enough non-zero counts, use sample with replacement"
                    )
                    replace = True

                #exclude_index passed to probsample_noreplace needs to be indexed to index2
                sampled_index[i, :] = probsample_noreplace(
                    index2,
                    sample_size=J,
                    prob_array=i_prob,
                    exclude_index=chosen_choice_index_to_index2[i],
                    return_index=True)
        sampling_prob = take(prob, sampled_index)
        sampled_index_within_prob = sampled_index.copy()
        sampled_index = index2[sampled_index]
        is_chosen_choice = zeros(sampled_index.shape, dtype="bool")
        #chosen_choice = -1 * ones(chosen_choice_index.size, dtype="int32")
        if include_chosen_choice:
            sampled_index = column_stack(
                (chosen_choice_index[:, newaxis], sampled_index))
            is_chosen_choice = zeros(sampled_index.shape, dtype="bool")
            is_chosen_choice[chosen_choice_index != UNPLACED_ID, 0] = 1
            #chosen_choice[where(is_chosen_choice)[0]] = where(is_chosen_choice)[1]
            ## this is necessary because prob is indexed to index2, not to the choice set (as is chosen_choice_index)
            sampling_prob_for_chosen_choices = take(
                prob, chosen_choice_index_to_index2[:, newaxis])
            ## if chosen choice chosen equals unplaced_id then the sampling prob is 0
            sampling_prob_for_chosen_choices[where(
                chosen_choice_index == UNPLACED_ID)[0], ] = 0.0
            sampling_prob = column_stack(
                [sampling_prob_for_chosen_choices, sampling_prob])

        interaction_dataset = self.create_interaction_dataset(
            dataset1, dataset2, index1, sampled_index)
        interaction_dataset.add_attribute(sampling_prob,
                                          '__sampling_probability')
        interaction_dataset.add_attribute(is_chosen_choice, 'chosen_choice')

        if local_resources.get("include_mnl_bias_correction_term", False):
            if include_chosen_choice:
                sampled_index_within_prob = column_stack(
                    (chosen_choice_index_to_index2[:, newaxis],
                     sampled_index_within_prob))
            interaction_dataset.add_mnl_bias_correction_term(
                prob, sampled_index_within_prob)

        ## to get the older returns
        #sampled_index = interaction_dataset.get_2d_index()
        #chosen_choices = UNPLACED_ID * ones(index1.size, dtype="int32")
        #where_chosen = where(interaction_dataset.get_attribute("chosen_choice"))
        #chosen_choices[where_chosen[0]]=where_chosen[1]
        #return (sampled_index, chosen_choice)

        return interaction_dataset
Ejemplo n.º 8
0
    choice_filter_index = None #where(datasets[choice_set_name].get_attribute('zone_id') == 742)[0]

    for year in years:
        SimulationState().set_current_time(year)
        SessionConfiguration().get_dataset_pool().remove_all_datasets()
        dataset_pool = DatasetPool(
            package_order=['psrc','urbansim','opus_core'],
            storage=AttributeCache())

        choice_set = dataset_pool.get_dataset(choice_set_name)
        if choice_filter_index is None:
            choice_filter_index = arange(choice_set.size())
        specified_coefficients = SpecifiedCoefficients().create(coefficients, specification, neqs=choice_filter_index.size)
        interaction_dataset = InteractionDataset(dataset1=agent_set,
                                                 dataset2=dataset_pool.get_dataset(choice_set_name),
                                                 index1=None,
                                                 index2=choice_filter_index)

        interaction_dataset.compute_variables(variables,
                                              dataset_pool = dataset_pool)

        for index in range(agent_set.size()):
            submodel = agent_set.get_attribute('submodel')[index]
            if specified_coefficients.nsubmodels == 1:
                submodel = -2
            coef = SpecifiedCoefficientsFor1Submodel(specified_coefficients,submodel)
            data = interaction_dataset.create_logit_data(coef, index=array(index))
            coef_names = coef.get_coefficient_names_from_alt().tolist()
            ids = reshape(choice_set.get_id_attribute()[choice_filter_index], (1,choice_filter_index.size,1))
            write_2d_data(concatenate((ids, data),axis=2), ['id'] + coef_names, filename=os.path.join(file_name_root,
                                                                  '%s_data_submodel%sa%s_year%s.txt' % (model, submodel, index+1, year)))
    def run(self, dataset1, dataset2, index1=None, index2=None, sample_size=10, weight=None,
            include_chosen_choice=False, with_replacement=False, resources=None, dataset_pool=None):
        
        """this function samples number of sample_size (scalar value) alternatives from dataset2
        for agent set specified by dataset1.
        If index1 is not None, only samples alterantives for agents with indices in index1;
        if index2 is not None, only samples alternatives from indices in index2.
        sample_size specifies number of alternatives to be sampled for each agent.
        weight, to be used as sampling weight, is either an attribute name of dataset2, or a 1d
        array of the same length as index2 or 2d array of shape (index1.size, index2.size).

        Also refer to document of interaction_dataset"""

        if dataset_pool is None:
            try:
                sc = SessionConfiguration()
                dataset_pool=sc.get_dataset_pool()
            except:
                dataset_pool = DatasetPool()
        
        local_resources = Resources(resources)
        local_resources.merge_if_not_None(
                {"dataset1": dataset1, "dataset2": dataset2,
                "index1":index1, "index2": index2,
                "sample_size": sample_size, "weight": weight,
                "with_replacement": with_replacement,
                "include_chosen_choice": include_chosen_choice})

        local_resources.check_obligatory_keys(['dataset1', 'dataset2', 'sample_size'])
        agent = local_resources["dataset1"]
        index1 = local_resources.get("index1", None)
        if index1 is None:
            index1 = arange(agent.size())
        choice = local_resources["dataset2"]
        index2 = local_resources.get("index2", None)
        if index2 is None:
            index2 = arange(choice.size())
            
        if index1.size == 0 or index2.size == 0:
            err_msg = "either choice size or agent size is zero, return None"
            logger.log_warning(err_msg)
            return None
        
        include_chosen_choice = local_resources.get("include_chosen_choice",  False)
        J = local_resources["sample_size"]
        if include_chosen_choice:
            J = J - 1
            
        with_replacement = local_resources.get("with_replacement")
            
        weight = local_resources.get("weight", None)
        if isinstance(weight, str):
            if weight in choice.get_known_attribute_names():
                weight=choice.get_attribute(weight)
                rank_of_weight = 1 
            elif VariableName(weight).get_dataset_name() == choice.get_dataset_name():
                weight=choice.compute_variables(weight, dataset_pool=dataset_pool)
                rank_of_weight = 1
            else:
                ## weights can be an interaction variable
                interaction_dataset = InteractionDataset(local_resources)
                weight=interaction_dataset.compute_variables(weight, dataset_pool=dataset_pool)
                rank_of_weight = 2
        elif isinstance(weight, ndarray):
            rank_of_weight = weight.ndim
        elif not weight:  ## weight is None or empty string
            weight = ones(index2.size)
            rank_of_weight = 1
        else:
            err_msg = "unkown weight type"
            logger.log_error(err_msg)
            raise TypeError, err_msg

        if (weight.size <> index2.size) and (weight.shape[rank_of_weight-1] <> index2.size):
            if weight.shape[rank_of_weight-1] == choice.size():
                if rank_of_weight == 1:
                    weight = take(weight, index2)
                if rank_of_weight == 2:
                    weight = take(weight, index2, axis=1)
            else:
                err_msg = "weight array size doesn't match to size of dataset2 or its index"
                logger.log_error(err_msg)
                raise ValueError, err_msg

        prob = normalize(weight)

        #chosen_choice = ones(index1.size) * UNPLACED_ID
        chosen_choice_id = agent.get_attribute(choice.get_id_name()[0])[index1]
        #index_of_placed_agent = where(greater(chosen_choice_id, UNPLACED_ID))[0]
        chosen_choice_index = choice.try_get_id_index(chosen_choice_id, return_value_if_not_found=UNPLACED_ID)
        chosen_choice_index_to_index2 = lookup(chosen_choice_index, index2, index_if_not_found=UNPLACED_ID)
        
        if rank_of_weight == 1: # if weight_array is 1d, then each agent shares the same weight for choices
            replace = with_replacement           # sampling with no replacement 
            if nonzerocounts(weight) < J:
                logger.log_warning("weight array dosen't have enough non-zero counts, use sample with replacement")
                replace = True
            sampled_index = prob2dsample( index2, sample_size=(index1.size, J),
                                        prob_array=prob, exclude_index=chosen_choice_index_to_index2,
                                        replace=replace, return_index=True )
            #return index2[sampled_index]

        if rank_of_weight == 2:
            sampled_index = zeros((index1.size,J), dtype="int32") - 1
                
            for i in range(index1.size):
                replace = with_replacement          # sampling with/without replacement
                i_prob = prob[i,:]
                if nonzerocounts(i_prob) < J:
                    logger.log_warning("weight array dosen't have enough non-zero counts, use sample with replacement")
                    replace = True

                #exclude_index passed to probsample_noreplace needs to be indexed to index2
                sampled_index[i,:] = probsample_noreplace( index2, sample_size=J, prob_array=i_prob,
                                                     exclude_index=chosen_choice_index_to_index2[i],
                                                     return_index=True )
        sampling_prob = take(prob, sampled_index)
        sampled_index = index2[sampled_index]
        is_chosen_choice = zeros(sampled_index.shape, dtype="bool")
        #chosen_choice = -1 * ones(chosen_choice_index.size, dtype="int32")
        if include_chosen_choice:
            sampled_index = column_stack((chosen_choice_index[:,newaxis],sampled_index))
            is_chosen_choice = zeros(sampled_index.shape, dtype="bool")
            is_chosen_choice[chosen_choice_index!=UNPLACED_ID, 0] = 1
            #chosen_choice[where(is_chosen_choice)[0]] = where(is_chosen_choice)[1]
            ## this is necessary because prob is indexed to index2, not to the choice set (as is chosen_choice_index)
            sampling_prob_for_chosen_choices = take(prob, chosen_choice_index_to_index2[:, newaxis])
            ## if chosen choice chosen equals unplaced_id then the sampling prob is 0
            sampling_prob_for_chosen_choices[where(chosen_choice_index==UNPLACED_ID)[0],] = 0.0
            sampling_prob = column_stack([sampling_prob_for_chosen_choices, sampling_prob])
        
        interaction_dataset = self.create_interaction_dataset(dataset1, dataset2, index1, sampled_index)
        interaction_dataset.add_attribute(sampling_prob, '__sampling_probability')
        interaction_dataset.add_attribute(is_chosen_choice, 'chosen_choice')
        
        ## to get the older returns
        #sampled_index = interaction_dataset.get_2d_index()
        #chosen_choices = UNPLACED_ID * ones(index1.size, dtype="int32") 
        #where_chosen = where(interaction_dataset.get_attribute("chosen_choice"))
        #chosen_choices[where_chosen[0]]=where_chosen[1]
        #return (sampled_index, chosen_choice)
        
        return interaction_dataset
Ejemplo n.º 10
0
    for year in years:
        SimulationState().set_current_time(year)
        SessionConfiguration().get_dataset_pool().remove_all_datasets()
        dataset_pool = DatasetPool(
            package_order=['psrc', 'urbansim', 'opus_core'],
            storage=AttributeCache())

        choice_set = dataset_pool.get_dataset(choice_set_name)
        if choice_filter_index is None:
            choice_filter_index = arange(choice_set.size())
        specified_coefficients = SpecifiedCoefficients().create(
            coefficients, specification, neqs=choice_filter_index.size)
        interaction_dataset = InteractionDataset(
            dataset1=agent_set,
            dataset2=dataset_pool.get_dataset(choice_set_name),
            index1=None,
            index2=choice_filter_index)

        interaction_dataset.compute_variables(variables,
                                              dataset_pool=dataset_pool)

        for index in range(agent_set.size()):
            submodel = agent_set.get_attribute('submodel')[index]
            if specified_coefficients.nsubmodels == 1:
                submodel = -2
            coef = SpecifiedCoefficientsFor1Submodel(specified_coefficients,
                                                     submodel)
            data = interaction_dataset.create_logit_data(coef,
                                                         index=array(index))
            coef_names = coef.get_coefficient_names_from_alt().tolist()
Ejemplo n.º 11
0
                            data=[85,20,0,90,35,51,0,10,5])

from opus_core.datasets.dataset_pool import DatasetPool
dataset_pool = DatasetPool(package_order=['urbansim', 'opus_core'], storage=storage)
dataset_pool.datasets_in_pool()
hs = dataset_pool.get_dataset("household")
dataset_pool.datasets_in_pool()
hs.size()
constant = dataset_pool.get_dataset("urbansim_constant")
constant["percent_coverage_threshold"] = 50
locations.compute_variables(["urbansim.gridcell.is_in_wetland"], dataset_pool=dataset_pool)
locations.get_attribute("is_in_wetland")

# Interaction variables
from opus_core.datasets.interaction_dataset import InteractionDataset
interactions = InteractionDataset(dataset1 = households, dataset2 = locations)
interactions.get_dataset_name()

from numpy import arange
interactions = InteractionDataset(dataset1 = households, dataset2 = locations, index1 = arange(5), index2 = arange(3))
interactions.compute_variables(["urbansim.household_x_gridcell.cost_times_income"])

specification = EquationSpecification(
                     variables=array([
                          "gridcell.cost",
                          "urbansim.household_x_gridcell.cost_times_income"]),
                     coefficients=array(["costcoef", "cti_coef"]))
households.add_primary_attribute(data=[2,8,3,1,5,4,9,7,3,6], name="location")
coef, other_results = hlcm.estimate(specification, households)

# Versioning