def create_travel_model_input_file(self,
                                       config,
                                       year,
                                       zone_set,
                                       #dataset_pool,
                                       delimiter = ',',
                                       *args,**kwargs):
        """Writes to file specified by 'urbansim_to_tm_variable_file' 
        to [travel_model_data_directory]/[year_directory], which travel model 
        will use as input.
        """
        tm_config       = config['travel_model_configuration']
        column_names    = tm_config['tm_variable_order']
        variable_list   = []
                
        for name in column_names:
            variable_list.append(tm_config['urbansim_to_tm_variable_mapping'][name])

        # compute our variables
	dataset_pool = SessionConfiguration().get_dataset_pool()
        zone_set.compute_variables(variable_list, dataset_pool=dataset_pool)
        variable_short_name = [VariableName(x).get_alias() for x in variable_list]
        # the short names are generated by the zone_set, e.g. ['autogenvar1', 'autogenvar5', 'autogenvar7' ... ]
        # logger.log_status(variable_short_name)
        
        # prepare the data for writing.
        # not packing it into a numpy array because we lose the int/float information
        data = {}  # colnum -> data array
        for i in range(len(variable_short_name)):
            this_column = zone_set.get_attribute(variable_short_name[i])
            data[i] = this_column

        # make the travel model directory
        tm_input_data_dir = os.path.join(tm_config['travel_model_base_directory'], tm_config[year]['year_dir'])
        if not os.path.exists(tm_input_data_dir):
            os.makedirs(tm_input_data_dir)
        
        tm_file = tm_config['urbansim_to_tm_variable_file'] + '_' + tm_config[year]['year_dir'] + '.csv'   
        self._update_travel_model_data_file(tm_config, data, column_names, 
                os.path.join(tm_input_data_dir, tm_file), delimiter)
        dbf.from_csv(csvfile = os.path.join(tm_input_data_dir, tm_file), to_disk = True, field_names = None)
Esempio n. 2
0
#https://pythonhosted.org/dbf/
import dbf
some_table = dbf.from_csv(csvfile='./csv/example.csv', filename='./dbf/example',field_names='test1,test2,id'.split(","), to_disk=True)
Esempio n. 3
0
#https://pythonhosted.org/dbf/
import dbf
some_table = dbf.from_csv(csvfile='./export/csv/property_tax.csv', filename='./export/dbf/property_tax',field_names='zoneId,zone,wardNo,streetId,street,asstNo,nameAddr,category,hlfYrlyTax,penalty,total,typeOfBldg'.split(","), to_disk=True)
# coding: utf-8

# In[ ]:

####generating DBF file


# In[2]:

###this creates a new DBF from a CSV file
import dbf
some_table = dbf.from_csv(csvfile='C:/Users/titog/Desktop/city lab/knox test/files/prueba python/ca10y10ons.csv', to_disk=True, field_names='x y time'.split())
#some_table = dbf.from_csv(csvfile='data.csv', filename='mytable',
        #field_names='name age birth'.split())


# In[ ]:

###print DBF


# In[ ]:

###knox, this is working! We tried changing just the DBF file for a new one and it does not work.
### this is the error we get ValueError: total size of new array must be unchanged


import numpy as np
import pysal
from datetime import timedelta
# coding: utf-8

# In[ ]:

####generating DBF file

# In[2]:

###this creates a new DBF from a CSV file
import dbf
some_table = dbf.from_csv(
    csvfile=
    'C:/Users/titog/Desktop/city lab/knox test/files/prueba python/ca10y10ons.csv',
    to_disk=True,
    field_names='x y time'.split())
#some_table = dbf.from_csv(csvfile='data.csv', filename='mytable',
#field_names='name age birth'.split())

# In[ ]:

###print DBF

# In[ ]:

###knox, this is working! We tried changing just the DBF file for a new one and it does not work.
### this is the error we get ValueError: total size of new array must be unchanged

import numpy as np
import pysal
from datetime import timedelta
Esempio n. 6
0
    #pandasDF = pd.DataFrame(db[:]) #Convert to Pandas DF
    pandasDF = pd.DataFrame(d) #Convert to Pandas DF
    if upper == True: #Make columns uppercase if wanted 
        pandasDF.columns = map(str.upper, db.header) 
    db.close() 
    return pandasDF
    

    
if __name__ == "__main__":
    
    '''
        Load .dbf into DataFrame
    '''
    df = dbf2DF('test.dbf')
    
    '''
        Save DF into .CSV
    '''
    df.columns = ['UNO', 'DOS', 'TRES']
    df.to_csv('test.csv',index=None, header=False)
    
    '''
        Load CSV
    '''
    columns = list(df.columns)
    dbf_table = dbf.from_csv(csvfile='test.csv',filename='mytable', 
                            field_names=columns, to_disk=True)