Exemple #1
0
 def plot_coefficients(self, submodel=-2, exclude_constant=True, eqidx=0, plot=True, 
                       store_values_to_file=None):
     """ Plot a barchart of coefficient values. This can be used in a regression model, 
     when coefficients are standardized 
     (i.e. using the estimation module opus_core.estimate_linear_regression_standardized).
     store_values_to_file can be a file name where the values are stored.
     """
     coef = self.get_coefficients(submodel)
     values = coef.get_coefficient_values()
     names = coef.get_coefficient_names()
     sd = coef.get_standard_errors()
     idx=ones(names.shape[1], dtype="bool")
     if exclude_constant:
         pos = coef.get_constants_positions()
         if pos.size > 0:               
             idx[pos]=0
     if store_values_to_file is not None:
         n = idx.sum()
         result = concatenate((reshape(names[eqidx, idx], (n,1)), 
                              reshape(values[eqidx, idx], (n,1)),
                              reshape(sd[eqidx, idx], (n,1))), axis=1)
         write_to_text_file(store_values_to_file, array(['coefficient_name', 'estimate', 'standard_error']), 
                            delimiter='\t')
         write_table_to_text_file(store_values_to_file, result, delimiter='\t', mode='a')
     if plot:
         plot_barchart(values[eqidx, idx], labels = names[eqidx, idx], errors=sd[eqidx, idx])
     else:
         return {'names': names[eqidx, idx], 'values': values[eqidx, idx], 'errors': sd[eqidx, idx]}
Exemple #2
0
 def plot_coefficients(self, submodel=-2, exclude_constant=True, eqidx=0, plot=True, 
                       store_values_to_file=None):
     """ Plot a barchart of coefficient values. This can be used in a regression model, 
     when coefficients are standardized 
     (i.e. using the estimation module opus_core.estimate_linear_regression_standardized).
     store_values_to_file can be a file name where the values are stored.
     """
     coef = self.get_coefficients(submodel)
     values = coef.get_coefficient_values()
     names = coef.get_coefficient_names()
     sd = coef.get_standard_errors()
     idx=ones(names.shape[1], dtype="bool")
     if exclude_constant:
         pos = coef.get_constants_positions()
         if pos.size > 0:               
             idx[pos]=0
     if store_values_to_file is not None:
         n = idx.sum()
         result = concatenate((reshape(names[eqidx, idx], (n,1)), 
                              reshape(values[eqidx, idx], (n,1)),
                              reshape(sd[eqidx, idx], (n,1))), axis=1)
         write_to_text_file(store_values_to_file, array(['coefficient_name', 'estimate', 'standard_error']), 
                            delimiter='\t')
         write_table_to_text_file(store_values_to_file, result, delimiter='\t', mode='a')
     if plot:
         plot_barchart(values[eqidx, idx], labels = names[eqidx, idx], errors=sd[eqidx, idx])
     else:
         return {'names': names[eqidx, idx], 'values': values[eqidx, idx], 'errors': sd[eqidx, idx]}
Exemple #3
0
 def export_weights_posterior_mean_and_variance(
         self,
         years,
         quantity_of_interest,
         directory,
         filename=None,
         use_bias_and_variance_from=None,
         ids=None):
     for year in years:
         self.set_posterior(year, quantity_of_interest,
                            use_bias_and_variance_from)
         if filename is None:
             filename = quantity_of_interest
         file = os.path.join(directory, str(year) + '_' + filename)
         write_to_text_file(file,
                            concatenate((array([0.]), self.get_weights())),
                            delimiter=' ')
         write_to_text_file(file,
                            concatenate(
                                (array([0.]),
                                 self.get_posterior_component_variance())),
                            mode='a',
                            delimiter=' ')
         variable_list = self.get_variable_names()
         quantity_index = variable_list.index(quantity_of_interest)
         if ids is None:
             ids = self.m_ids
         means = zeros((ids.size, self.number_of_runs + 1))
         means[:, 0] = ids
         means[self.observed_data.get_quantity_objects(
         )[quantity_index].get_dataset().get_id_index(ids),
               1:means.shape[1]] = self.get_posterior_component_mean()
         write_table_to_text_file(file, means, mode='a')
 def _do_export_values(self, variable, values, directory, prefix='', variable_prefix=''):
     filename = os.path.join(directory, "%s%s.%s%s" % (prefix, VariableName(variable).get_dataset_name(), variable_prefix,
                                                       VariableName(variable).get_alias()))
     if self.ids_matching_values_from_mr[variable].ndim < values.ndim:
         data = concatenate((self.ids_matching_values_from_mr[variable][:,newaxis], values), axis=1)
     else:
         data = concatenate((self.ids_matching_values_from_mr[variable], values), axis=1)
     write_table_to_text_file(filename, data)
Exemple #5
0
 def write_posterior_mean_and_variance(self,
                                       mean_filename=None,
                                       variance_filename=None):
     if mean_filename is not None:
         write_table_to_text_file(mean_filename,
                                  self.get_posterior_component_mean())
     if variance_filename is not None:
         write_to_text_file(variance_filename,
                            self.get_posterior_component_variance(),
                            delimiter=' ')
 def export_weights_posterior_mean_and_variance(self, years, quantity_of_interest, directory, filename=None, 
                                                  use_bias_and_variance_from=None, ids = None, **kwargs):
     for year in years:
         self.set_posterior(year, quantity_of_interest, use_bias_and_variance_from, **kwargs)
         if filename is None:
             filename = quantity_of_interest
         file = os.path.join(directory, str(year) + '_' + filename)
         write_to_text_file(file, concatenate((array([0.]), self.get_weights())), delimiter=' ')
         write_to_text_file(file, concatenate((array([0.]), self.get_posterior_component_variance())), mode='a', delimiter=' ')
         variable_list = self.get_variable_names()
         quantity_index = variable_list.index(quantity_of_interest)
         if ids is None:
             ids = self.m_ids
         means = zeros((ids.size, self.number_of_runs+1))
         means[:,0] = ids
         means[:,1:means.shape[1]] = self.get_posterior_component_mean()
         write_table_to_text_file(file, means, mode='a')            
Exemple #7
0
 def _do_export_values(self,
                       variable,
                       values,
                       directory,
                       prefix='',
                       variable_prefix=''):
     filename = os.path.join(
         directory, "%s%s.%s%s" %
         (prefix, VariableName(variable).get_dataset_name(),
          variable_prefix, VariableName(variable).get_alias()))
     if self.ids_matching_values_from_mr[variable].ndim < values.ndim:
         data = concatenate(
             (self.ids_matching_values_from_mr[variable][:,
                                                         newaxis], values),
             axis=1)
     else:
         data = concatenate(
             (self.ids_matching_values_from_mr[variable], values), axis=1)
     write_table_to_text_file(filename, data)
Exemple #8
0
 def test_get_node_data_into_node_travel_data_set(self):
     temp_dir = tempfile.mkdtemp(prefix='opus_tmp')
     file1 = 'report1'
     temp_file1 = os.path.join(temp_dir, file1)
     write_to_text_file(temp_file1, array(['inode', 'jnode', 'timau', '@corr', 'len', 'result']), delimiter=' ')
     write_table_to_text_file(temp_file1, array([[1,2, 35.6, 4, 1.2, 0], 
                                                 [2,1, 23.5, 3, 0.3,100], 
                                                 [4,10, 2.1, 3, 0.5, 10],
                                                 [3,1, 15.8, 4, 1.1, 5] ]), delimiter = ' ', mode='a')
     file2 = 'report2'
     temp_file2 = os.path.join(temp_dir, file2)
     write_to_text_file(temp_file2, array(['inode', 'jnode', 'volau', 'result']), delimiter=' ')
     write_table_to_text_file(temp_file2, array([[1,2, 110, 0], 
                                                [3,1, 350, 400], 
                                                [5,4, 200, 200]]), delimiter = ' ', mode='a')
     
     node_matrix_attribute_map = {file1: {
                                          'timau':'travel_time',
                                          'len':'distance',
                                          '@corr': 'corridor'
                                          },
                                  file2: {
                                          'volau': 'travel_volume'
                                          }
                                  }
     tm_output = TravelModelOutput()
     node_travel_data_set = tm_output.get_node_travel_data_set(node_matrix_attribute_map, temp_dir)
     # size should be 5, since there are 5 unique combinations of from_node, to_node
     self.assertEqual(node_travel_data_set.size(), 5)
     # the dataset should have 6 attributes
     self.assertEqual(len(node_travel_data_set.get_known_attribute_names()), 6)
     self.assertEqual('travel_time' in node_travel_data_set.get_known_attribute_names(), True)
     self.assertEqual('distance' in node_travel_data_set.get_known_attribute_names(), True)
     self.assertEqual('corridor' in node_travel_data_set.get_known_attribute_names(), True)
     self.assertEqual('travel_volume' in node_travel_data_set.get_known_attribute_names(), True)
     self.assertEqual('from_node_id' in node_travel_data_set.get_known_attribute_names(), True)
     self.assertEqual('to_node_id' in node_travel_data_set.get_known_attribute_names(), True)
     # check values of one node
     node = node_travel_data_set.get_data_element_by_id((3,1))
     self.assertEqual(node.corridor, 4)
     self.assertEqual(node.travel_volume, 350)
     shutil.rmtree(temp_dir) 
    def run(self, data, coefficients, resources=None):
        """
        Like linear_utilities, but in addition it runs linear utilities for
        modified data and stores utilities when each variable is set to its 5%, 95% quantiles,
        keeping the other variables at their median. Last row in the resulting file is the difference in
        utilities between these two.
        The file name can be passed in resources - entry 'utilities_diagnose_file'.
        """
        if data.ndim < 3:
            raise StandardError, "Argument 'data' must be a 3D numpy array."

        if not isinstance(resources, Resources):
            resources= Resources(resources)
        nobs, neqs, nvar = data.shape
        medians = zeros(nvar, dtype=float32)
        quant = zeros((2,nvar), dtype=float32)
        data_with_medians = array(data[0,:,:])
        for ivar in range(nvar): # compute medain and quantiles for each variable
            medians[ivar], quant[0,ivar], quant[1,ivar] = quantile(data[:,:,ivar].ravel(), array([0.5, 0.05, 0.95]))
            data_with_medians[:,ivar] = medians[ivar]


        file_name = resources.get("utilities_diagnose_file", "util")
        if resources.get("submodel", None) is not None:
            file_name = "%s_submodel_%s" % (file_name, resources.get("submodel", 1))
        diagnose_utilities = zeros((3, nvar), dtype=float32)
        argcor = ()
        for ivar in range(nvar): # iterate over variables
            for iquant in [0,1]: # 0 for 5% quantile, 1 for 95% quantile
                mod_data = array(data_with_medians).reshape(1,neqs, nvar) # copy original data
                mod_data[0,:,ivar] = quant[iquant, ivar]
                utility = linear_utilities.run(self, mod_data, coefficients, resources)
                diagnose_utilities[iquant, ivar] = utility[0,0]
            argcor = argcor + (data[:,:,ivar].ravel(),)
        diagnose_utilities[2,:] = diagnose_utilities[1,:] - diagnose_utilities[0,:]
        coef_names = resources.get("coefficient_names", map(lambda x: 'x%s' % x, arange(nvar)+1))
        write_to_text_file(file_name, coef_names, delimiter=' ')
        write_table_to_text_file( file_name, diagnose_utilities, mode='ab')
        logger.log_status("Diagnosed utilities written into %s." % file_name)
        return linear_utilities.run(self, data, coefficients, resources)
Exemple #10
0
 def export_confidence_intervals(self, confidence_levels, filename, delimiter='\t'):
     """Export confidence intervals into a file. 
     confidence_levels is a list of desired confidence levels. 
     The resulting file has a id column, 'mean' column and for each level 
     its lower bound and upper bound columns.
     The method generate_posterior_distribution should be called prior to this method.
     """
     if not isinstance(confidence_levels, list):
         confidence_levels = [confidence_levels]
     lcl = len(confidence_levels)
     result = zeros((self.simulated_values_ids.size, 2+2*lcl), dtype='float32')
     result[:,0] = self.simulated_values_ids
     result[:,1] = self.get_quantity_from_simulated_values("mean")
     clheader = []
     for cl in confidence_levels:
         clheader = clheader + ['lower_%s' % cl, 'upper_%s' % cl]
     write_to_text_file(filename, ['id', 'mean'] + clheader, delimiter=delimiter)
     i = 2
     for cl in confidence_levels:
         ci = self.get_probability_interval(cl/100.0)
         result[:,i:(i+2)] = transpose(ci)
         i = i+2
     write_table_to_text_file(filename, result, mode='a', delimiter=delimiter)
 def write_into_run_id_file(self):
     result = array(
         map(lambda (x, y): [x, y[0], y[1]], self.run_ids_dict.iteritems()))
     write_table_to_text_file(self.run_id_file, result)
Exemple #12
0
 def write_values_from_multiple_runs(self, filename, transformed_back=True):
     write_table_to_text_file(filename, self.get_predicted_values(transformed_back=transformed_back))
Exemple #13
0
 def write_expected_values(self, filename, index, transformed_back=True):
     write_table_to_text_file(filename, self.get_expected_values_by_index(index, transformed_back))
Exemple #14
0
 def write_simulated_values(self, filename):
     write_table_to_text_file(filename, self.simulated_values)
Exemple #15
0
    def test_get_node_data_into_node_travel_data_set(self):
        temp_dir = tempfile.mkdtemp(prefix='opus_tmp')
        file1 = 'report1'
        temp_file1 = os.path.join(temp_dir, file1)
        write_to_text_file(
            temp_file1,
            array(['inode', 'jnode', 'timau', '@corr', 'len', 'result']),
            delimiter=' ')
        write_table_to_text_file(temp_file1,
                                 array([[1, 2, 35.6, 4, 1.2, 0],
                                        [2, 1, 23.5, 3, 0.3, 100],
                                        [4, 10, 2.1, 3, 0.5, 10],
                                        [3, 1, 15.8, 4, 1.1, 5]]),
                                 delimiter=' ',
                                 mode='a')
        file2 = 'report2'
        temp_file2 = os.path.join(temp_dir, file2)
        write_to_text_file(temp_file2,
                           array(['inode', 'jnode', 'volau', 'result']),
                           delimiter=' ')
        write_table_to_text_file(temp_file2,
                                 array([[1, 2, 110, 0], [3, 1, 350, 400],
                                        [5, 4, 200, 200]]),
                                 delimiter=' ',
                                 mode='a')

        node_matrix_attribute_map = {
            file1: {
                'timau': 'travel_time',
                'len': 'distance',
                '@corr': 'corridor'
            },
            file2: {
                'volau': 'travel_volume'
            }
        }
        tm_output = TravelModelOutput()
        node_travel_data_set = tm_output.get_node_travel_data_set(
            node_matrix_attribute_map, temp_dir)
        # size should be 5, since there are 5 unique combinations of from_node, to_node
        self.assertEqual(node_travel_data_set.size(), 5)
        # the dataset should have 6 attributes
        self.assertEqual(len(node_travel_data_set.get_known_attribute_names()),
                         6)
        self.assertEqual(
            'travel_time' in node_travel_data_set.get_known_attribute_names(),
            True)
        self.assertEqual(
            'distance' in node_travel_data_set.get_known_attribute_names(),
            True)
        self.assertEqual(
            'corridor' in node_travel_data_set.get_known_attribute_names(),
            True)
        self.assertEqual(
            'travel_volume'
            in node_travel_data_set.get_known_attribute_names(), True)
        self.assertEqual(
            'from_node_id' in node_travel_data_set.get_known_attribute_names(),
            True)
        self.assertEqual(
            'to_node_id' in node_travel_data_set.get_known_attribute_names(),
            True)
        # check values of one node
        node = node_travel_data_set.get_data_element_by_id((3, 1))
        self.assertEqual(node.corridor, 4)
        self.assertEqual(node.travel_volume, 350)
        shutil.rmtree(temp_dir)
Exemple #16
0
def export_quantiles(bm, outdir, years=[2010, 2040], repl=10000, validation_year=2010, validation_geography='faz', 
                        propfac_hh=0.95, propfac_jobs=3.5, propfac_pop=3.9, no_propagation=[False], aggregate_to=None, 
                        store_simulated=False, **kwargs):
    header_base = ['mean', 'median', 'lower_50', 'upper_50', 'lower_80', 'upper_80', 'lower_90', 'upper_90', 'lower_95', 'upper_95']
    idcolname = {'zone': 'zone_id', 'faz': 'faz_id', 'large_area': 'large_area_id', 'city': 'city_id', 'tract10': 'tract10_id', 'reggeo': 'reggeo_id'}
    header = {}
    for g in idcolname.keys():
        header[g] = array([idcolname[g]] + header_base)[newaxis,:]
    if aggregate_to is not None:
        header[validation_geography] = header[aggregate_to]
    vars = {'household':hhs_vars, 'job':jobs_vars, 'population':pop_vars}
    for year in years:
        if year < validation_year:
            continue
        #for nopropag in [True, False]:
        for nopropag in no_propagation:
            if not nopropag and year == validation_year:
                continue
            #for transform in [True, False]:
            transform = True
            propfac = {}
            for additive_prop in [True, False]:
            #for additive_prop in [True]:
                if nopropag and not additive_prop:
                    continue
                if additive_prop:
                    if nopropag:
                        propfac['household'] = 0
                        propfac['job'] = 0
                        propfac['population'] = 0
                    else:
                        propfac['household'] = propfac_hh
                        propfac['job'] = propfac_jobs
                        propfac['population'] = propfac_pop
                else:
                    propfac['household'] = 1
                    propfac['job'] = 1
                    propfac['population'] = 1
                if not nopropag:
                    suffix = "_propf"
                else:
                    suffix = ""
                if not transform:
                    suffix = suffix + "_raw"
                if additive_prop and not nopropag:
                    suffix = suffix + "_add"
                seed(1)
                #for bias in [False, True]:
                for bias in [False]:
                    if bias:
                        suffix = suffix + "_bias"
                    addpropbias = not bias
                    #bmf = deepcopy(bm)
                    
                    for indicator in vars.keys():                        
                        if aggregate_to is None:
                            exact = True # calibration geography is the same as simulation target
                            bm.set_posterior(year=year, quantity_of_interest=vars[indicator][validation_geography], 
                                     additive_propagation=[addpropbias, additive_prop], 
                                     propagation_factor=[0, propfac[indicator]])
                            mean_hhs = bm.get_posterior_component_mean().mean(axis=1)
                            median = bm.get_exact_quantile(0.5, transformed_back=transform)
                            hhids = bm.get_m_ids()
                            if transform:
                                mean_hhs = mean_hhs**2
                            file_geo = validation_geography                         
                        else: # aggregation
                            exact = False
                            posterior_hhs = bm.generate_posterior_distribution(year=year, quantity_of_interest=vars[indicator][validation_geography], aggregate_to=aggregate_to,
                                                   replicates=repl, omit_bias=not bias, no_propagation=nopropag, additive_propagation=[addpropbias, additive_prop], 
                                                   propagation_factor=[0, propfac[indicator]], transformed_back=transform, **kwargs)
                            mean_hhs = bm.get_quantity_from_simulated_values("mean")
                            median = bm.get_quantity_from_simulated_values("median")
                            hhids = bm.simulated_values_ids
                            file_geo = aggregate_to
                            if store_simulated:
                                bm.write_simulated_values(os.path.join(outdir, "%s_simulated_values_%s" % (aggregate_to, indicator)))
                                #write_table_to_text_file(os.path.join(outdir, '%s_ids_%s' % (aggregate_to, indicator)), bm.simulated_values_ids[:,newaxis], delimiter='\t')

                        prob80_hhs = bm.get_probability_interval(80, exact = exact, transformed_back=transform)
                        prob50_hhs = bm.get_probability_interval(50, exact = exact, transformed_back=transform)
                        prob90_hhs = bm.get_probability_interval(90, exact = exact, transformed_back=transform)
                        prob95_hhs = bm.get_probability_interval(95, exact = exact, transformed_back=transform)
                
                        write_table_to_text_file(os.path.join(outdir, '%s_%s_ci_%s%s' % (file_geo, indicator, year, suffix)), header[validation_geography])
                        write_table_to_text_file(os.path.join(outdir, '%s_%s_ci_%s%s' % (file_geo, indicator, year, suffix)), round(concatenate((hhids[:,newaxis], mean_hhs[:,newaxis], median[:,newaxis], prob50_hhs, prob80_hhs, prob90_hhs, prob95_hhs), axis=1)).astype('int32'), delimiter='\t', mode='a')
 def write_posterior_mean_and_variance(self, mean_filename=None, variance_filename=None):
     if mean_filename is not None:
         write_table_to_text_file(mean_filename, self.get_posterior_component_mean())
     if variance_filename is not None:
         write_to_text_file(variance_filename, self.get_posterior_component_variance(), delimiter=' ')
Exemple #18
0
 def write_weight_components(self, filename):
     l = len(self.get_weight_components().keys())
     weight_matrix = zeros((l, self.number_of_runs))
     for i in range(l):
         weight_matrix[i,:] = self.get_weight_components()[i]
     write_table_to_text_file(filename, weight_matrix)
 def write_into_run_id_file(self):
     result = array(map(lambda(x,y): [x,y[0], y[1]], self.run_ids_dict.iteritems()))
     write_table_to_text_file(self.run_id_file, result)
Exemple #20
0
def match_parcels_to_constraints_and_templates(parcel_dataset,
                                                development_template_dataset,
                                                output_dir, log_scale=True, strict=True,
                                                output_points=False,
                                                parcel_index=None,
                                                template_index=None,
                                                consider_constraints_as_rules=True,
                                                template_opus_path="urbansim_parcel.development_template",
                                                dataset_pool=None,
                                                resources=None):
    """
    This function matches parcels to their constraints and templates and gives a summary about how many parcels have no match.
    It also creates a plot for each GLU and unit type of template ranges and densities.
    parcel_index - 1D array, indices of parcel_dataset (default is all parcels).
    template_index - index to templates that are available (default is all templates).
    If strict is True, parcels without templates are considered across GLU, otherwise only within each GLU. 
    """

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    resources = Resources(resources)
    debug = resources.get("debug",  0)
    if not isinstance(debug, DebugPrinter):
        debug = DebugPrinter(debug)

    if parcel_index is not None and parcel_index.size <= 0:
        return None
        
    if parcel_index is not None:
        index1 = parcel_index
    else:
        index1 = arange(parcel_dataset.size())

    if template_index is not None:
        index2 = template_index
    else:
        index2 = arange(development_template_dataset.size())

    has_constraint_dataset = True
    try:
        constraints = dataset_pool.get_dataset("development_constraint") 
        constraints.load_dataset_if_not_loaded()
    except:
        has_constraint_dataset = False

    parcels_glu = parcel_dataset.compute_variables(['parcel.disaggregate(land_use_type.generic_land_use_type_id)'], dataset_pool=dataset_pool)
    if has_constraint_dataset:
        constraint_types = unique(constraints.get_attribute("constraint_type"))  #unit_per_acre, far etc
        development_template_dataset.compute_variables(map(lambda x: "%s.%s" % (template_opus_path, x), constraint_types), dataset_pool)
            
        parcel_dataset.get_development_constraints(constraints, dataset_pool, 
                                                   index=index1, 
                                                   consider_constraints_as_rules=consider_constraints_as_rules)
        generic_land_use_type_ids = development_template_dataset.compute_variables("urbansim_parcel.development_template.generic_land_use_type_id",
                                                       dataset_pool=dataset_pool)

    parcel_ids = parcel_dataset.get_id_attribute()
    template_ids = development_template_dataset.get_id_attribute()
    
    
    has_template = zeros(index1.size, dtype="int32")
    vacant_land = parcel_dataset.compute_variables(['urbansim_parcel.parcel.vacant_land_area'],
                                                                dataset_pool=dataset_pool)[index1]
    is_vacant = vacant_land>0
    #vacant_land = vacant_land*logical_or(parcels_glu==1, parcels_glu==2)                                                            
    is_developable_parcel = zeros(index1.size, dtype="int32")
    accepted_by_constraints = zeros(index1.size, dtype="int32")
    
    #parcels_to_template = {} 
    parcels_to_template_acc_by_constr = {}
    density_types = development_template_dataset['density_type']
    parcels_acc_by_constr_wo_templ = {}
    parcels_acc_by_constr = {}
    #pidx = parcel_dataset.get_id_index(804461)
    logger.start_block("Combine parcels, templates and constraints")
    for i_template in index2:
        this_template_id = template_ids[i_template]
        
        fit_indicator = ones(index1.size, dtype="bool8")
        parcels_to_template_acc_by_constr[this_template_id] = []
        this_templ_accepted_by_constraints = zeros(index1.size, dtype="int32")
        has_this_template = zeros(index1.size, dtype="int32")
        if has_constraint_dataset:
            generic_land_use_type_id = generic_land_use_type_ids[i_template]
            if generic_land_use_type_id not in parcels_acc_by_constr_wo_templ.keys():
                parcels_acc_by_constr_wo_templ[generic_land_use_type_id] = zeros(index1.size, dtype="int32")
            if generic_land_use_type_id not in parcels_acc_by_constr.keys():
                parcels_acc_by_constr[generic_land_use_type_id] = zeros(index1.size, dtype="int32")
            #if generic_land_use_type_id not in [1,2]:
            #    continue
            units_proposed = parcel_dataset.compute_variables(['psrc_parcel.parcel.units_proposed_for_template_%s' % this_template_id],
                                                                dataset_pool=dataset_pool)[index1]
            is_size_fit = parcel_dataset.compute_variables(['psrc_parcel.parcel.is_size_fit_for_template_%s' % this_template_id],
                                                                dataset_pool=dataset_pool)[index1]
            for constraint_type, constraint in parcel_dataset.development_constraints[generic_land_use_type_id].iteritems():
                if density_types[i_template] <> constraint_type:
                    continue
                template_attribute = development_template_dataset.get_attribute(constraint_type)[i_template]  #density converted to constraint variable name
                if template_attribute == 0:
                    continue
                min_constraint = constraint[:, 0].copy()
                max_constraint = constraint[:, 1].copy()
                ## treat -1 as unconstrained
                w_unconstr = min_constraint == -1
                if w_unconstr.any():
                    min_constraint[w_unconstr] = template_attribute
                
                w_unconstr = max_constraint == -1
                if w_unconstr.any():
                    max_constraint[w_unconstr] = template_attribute

                this_accepted_by_constraints = logical_and(template_attribute >= min_constraint,
                                                        template_attribute <= max_constraint)
                fit_indicator = logical_and(fit_indicator, 
                                            logical_and(logical_and(this_accepted_by_constraints, units_proposed > 0), is_size_fit))
                
                is_developable_parcel = logical_or(is_developable_parcel, max_constraint > 0)
                this_templ_accepted_by_constraints = logical_or(this_templ_accepted_by_constraints, 
                                                                logical_and(is_developable_parcel, 
                                                                            logical_and(this_accepted_by_constraints, units_proposed > 0)))
                has_this_template = logical_or(has_this_template, fit_indicator)
            accepted_by_constraints = logical_or(accepted_by_constraints, this_templ_accepted_by_constraints)
            has_template = logical_or(has_template, has_this_template)
            #parcels_to_template[this_template_id] = where(logical_and(vacant_land>0, 
            #                    logical_and(logical_and(is_developable_parcel, this_accepted_by_constraints),
            #                                logical_not(fit_indicator))))[0]
            #parcels_to_template_acc_by_constr[this_template_id].append(where(accepted_by_constraints)[0].tolist())
            not_accepted = logical_and(this_templ_accepted_by_constraints, logical_and(logical_not(has_this_template), is_vacant))
            parcels_to_template_acc_by_constr[this_template_id].append(where(not_accepted)[0].tolist())
            parcels_acc_by_constr_wo_templ[generic_land_use_type_id] = logical_or(parcels_acc_by_constr_wo_templ[generic_land_use_type_id], 
                                            not_accepted)
            parcels_acc_by_constr[generic_land_use_type_id] = logical_or(parcels_acc_by_constr[generic_land_use_type_id], 
                                                            logical_and(this_templ_accepted_by_constraints, is_vacant))
            #if fit_indicator[pidx]:
            #    print 'Parcel 804461: template %s accepted.' %  this_template_id
            
    logger.end_block()
    ### Print summary
    ##################
    unique_glu = parcels_acc_by_constr_wo_templ.keys()
    #parcels_wo_templ = zeros(index1.size, dtype="int32")
    
    #parcels_wo_templ = where(logical_and(vacant_land>0, logical_and(is_developable_parcel, logical_not(has_template))))[0]
    #nr_parcels_wo_templ = parcels_wo_templ.size
    #is_vacant = vacant_land>0
    #logger.log_status("\nGLU\tvacant land\tconstraint out\tno template")
    logger.log_status("\nGLU\tconsidered\tno template")
    no_glu_templ = []
    parcels_wo_temp_by_glu = {}
    sum1 = 0
    sum2 = 0
    parcels_wo_templ = logical_not(has_template)
    for glu in unique_glu:
        if strict:
            parcels_acc_by_constr_wo_templ[glu] = logical_and(parcels_acc_by_constr_wo_templ[glu], parcels_wo_templ)
        #if glu == 3:
        #parcels_wo_templ = logical_or(parcels_wo_templ, parcels_acc_by_constr_wo_templ[glu])
#        if glu not in generic_land_use_type_ids:
#            no_glu_templ.append(glu)
        #idx = parcels_glu==glu
#        if idx.sum() > 0:
#            logger.log_status("%s\t%7i\t\t%7i\t\t%7i" % (glu, is_vacant[idx].sum(), 
#                        is_vacant[idx].sum() - logical_and(is_vacant[idx], is_developable_parcel[idx]).sum(),
#                        logical_and(is_vacant[idx], logical_and(is_developable_parcel[idx], logical_not(has_template[idx]))).sum()))
#            parcels_wo_temp_by_glu[glu] = where(logical_and(idx, logical_and(is_vacant, 
#                                    logical_and(is_developable_parcel, logical_not(has_template)))))[0]
        logger.log_status("%s\t%7i\t\t%7i" % (glu, parcels_acc_by_constr[glu].sum(), parcels_acc_by_constr_wo_templ[glu].sum()))
        sum1 = sum1 + parcels_acc_by_constr[glu].sum()
        sum2 = sum2 + parcels_acc_by_constr_wo_templ[glu].sum()      
    logger.log_status("\nall\t%7i\t\t%7i" % (sum1, sum2))
    #if len(no_glu_templ) > 0:
    #    logger.log_status("\nNo templates for GLUs: %s" % no_glu_templ)
        
    ### Create plots
    #################
    
    templ_min_max = {}
    for glu in unique_glu:
        gidx = where(parcels_acc_by_constr_wo_templ[glu])[0]
        logger.start_block("Creating figures for GLU %s using %s parcels" % (glu,gidx.size))
        templ_min_max[glu] = []
        max_land_sqft = {'far': 0, 'units_per_acre': 0}
        min_land_sqft = {'far': 9999999, 'units_per_acre': 9999999}
        max_templ_attr = {'far': 0, 'units_per_acre': 0}
        min_templ_attr = {'far': 999999, 'units_per_acre': 9999999}
        xy = {'far':[], 'units_per_acre':[]}
        points = {'far':zeros((0,3)), 'units_per_acre':zeros((0,3))}
        npoints = {'far': 0, 'units_per_acre': 0}
        for i_template in index2:
            if glu <> generic_land_use_type_ids[i_template]:
                continue
            this_template_id = template_ids[i_template]
            #units_proposed = parcel_dataset['units_proposed_for_template_%s' % this_template_id]
            #is_size_fit = parcel_dataset['is_size_fit_for_template_%s' % this_template_id]
            #is_constraint = zeros(parcel_dataset.size(), dtype='bool8')
            #is_constraint[array(parcels_to_template_acc_by_constr[this_template_id])]=True
            #is_size_fit = logical_and(logical_and(logical_not(is_size_fit), 
            #                                      logical_and(is_vacant, units_proposed>0)), 
            #                          logical_and(is_constraint,
            #                                      is_developable_parcel))
            missed_to_match = zeros(parcel_dataset.size(), dtype='bool8')
            missed_to_match[(unique(array(parcels_to_template_acc_by_constr[this_template_id]).flatten())).astype('int32')] = True
            missed_to_match = where(logical_and(missed_to_match, parcels_acc_by_constr_wo_templ[glu]))[0]
            #missed_to_match = unique(array(parcels_to_template_acc_by_constr[this_template_id]).flatten())
            for constraint_type, constraint in parcel_dataset.development_constraints[glu].iteritems():
                if density_types[i_template] <> constraint_type:
                    continue
                template_attribute = development_template_dataset.get_attribute(constraint_type)[i_template]  #density converted to constraint variable name
                if template_attribute == 0:
                    continue
                templ_min_max[glu].append([development_template_dataset["land_sqft_min"][i_template], 
                                           development_template_dataset["land_sqft_max"][i_template]])
                xy[constraint_type] = xy[constraint_type] + [[development_template_dataset["land_sqft_min"][i_template], 
                            development_template_dataset["land_sqft_max"][i_template]], 
                            [template_attribute, template_attribute]]
                #if is_size_fit[gidx].sum() > 0:
                if missed_to_match.size > 0:
                    npoints[constraint_type] = npoints[constraint_type] + missed_to_match.size #is_size_fit[gidx].sum()
                    #if is_size_fit[gidx].sum() > 100:
                    if missed_to_match.size > 100:
                        draw = sample_noreplace(missed_to_match, 100)
                        thisidx = draw
                    else:
                        thisidx = missed_to_match
                    points[constraint_type] = concatenate((points[constraint_type], 
                                      concatenate((parcel_dataset['vacant_land_area'][thisidx][:,newaxis], 
                                                   template_attribute*ones((thisidx.size,1)), 
                                                   parcel_ids[thisidx][:,newaxis]), axis=1)), axis=0)
                    max_land_sqft[constraint_type] = max(max_land_sqft[constraint_type], parcel_dataset['vacant_land_area'][thisidx].max())
                    min_land_sqft[constraint_type] = min(min_land_sqft[constraint_type], parcel_dataset['vacant_land_area'][thisidx].max())
                    max_templ_attr[constraint_type] = max(max_templ_attr[constraint_type], template_attribute)
                    min_templ_attr[constraint_type] = min(min_templ_attr[constraint_type], template_attribute)

        import matplotlib.ticker as ticker
        import matplotlib.pyplot as plt
        def myexp(x, pos):
            return '%i' % (round(exp(x)))
        def myexp2(x, pos):
            return '%.2f' % (round(exp(x), 2))

        for type in ['far', 'units_per_acre']:
            if points[type].size == 0:
                continue
            #print xy[type]
            lxy = array(xy[type])
            dots = points[type][:,0:2]
            minx = min_land_sqft[type]-100
            maxx = max_land_sqft[type]+100
            miny = min_templ_attr[type]-0.05
            maxy = max_templ_attr[type]+0.05
            if log_scale:
                lxy = log(lxy)
                dots = log(dots)
                minx = log(minx)
                maxx = log(maxx)
                miny = log(miny)
                maxy = log(maxy)
            fig = plt.figure()
            ax = fig.add_subplot(111)
            lines = ax.plot(*lxy) # template lines
            po = ax.plot(dots[:,0], dots[:,1]) # parcel points
            if log_scale:
                xformatter = ticker.FuncFormatter(myexp)
                yformatter = ticker.FuncFormatter(myexp2)
                ax.xaxis.set_major_formatter(xformatter)
                ax.yaxis.set_major_formatter(yformatter)
                # The following would be better but throws an error
                #locator = ticker.LogLocator(base=2.718282, subs=0.1)
                #ax.xaxis.set_major_locator(locator)
            plt.setp(lines, color='b', linewidth=1)
            plt.setp(po, marker='o', linestyle='None', linewidth=0)

            ax.axis([min(dots[:,0].min(), minx), 
                     max(dots[:,0].max(), maxx), 
                     min(dots[:,1].min(), miny), 
                     max(dots[:,1].max(), maxy)])
            plt.title('GLU: %s, units: %s, missing: %s' % (glu, type, npoints[type]))
            #ax.grid(True)
            plt.xlabel('land sqft range')
            plt.ylabel('density')
            log_suffix = ''
            if log_scale:
                log_suffix = '_log'
            plt.savefig(os.path.join(output_dir, 'match_templates%s_%s_%s.pdf' % (log_suffix, glu, type)))
            plt.close()
            #plt.show()
            if output_points:
            #if glu == 3:
                write_table_to_text_file(os.path.join(output_dir, 'points_%s_%s.txt' % (glu, type)), points[type], delimiter=', ')
        logger.end_block()

    logger.log_status('Resulting figures stored into %s' % output_dir)               
    return parcel_ids[index1][parcels_wo_templ]