Example #1
0
## Assumptions.

# Use codes were classified manually because the assessor classifications
# are meant for property tax purposes. These classifications should be
# reviewed and revised.
res_codes = {
    'single': ['01', '51', '52', '53'],
    'multi':
    [string.zfill(i, 2) for i in range(2, 6) + range(7, 10) + range(89, 99)],
    'mixed': []
}
exempt_codes = []

## Register input tables.

tf = TableFrame(staging.parcels_smt, index_col='apn')
sim.add_table('parcels_in', tf, copy_col=False)


@sim.table(cache=True)
def roll():
    mdb = loader.get_path(
        'built/parcel/2010/smt/Property Characteristics/ASSESSOR_ROLL.mdb')
    csv = os.path.splitext(mdb)[0] + '.csv'

    if not os.path.exists(csv):
        with open(csv, 'w') as f:
            # Export of MS Access database requires mdbtools.
            subprocess.check_call(['mdb-export', mdb, '2009_ROLL'], stdout=f)

    df = pd.read_csv(csv, dtype={'APN': str}, low_memory=False)
Example #2
0
# Use codes were classified manually because the assessor classifications
# are meant for property tax purposes. These classifications should be
# reviewed and revised.
res_codes = {
    'single': ['RSFR'],
    'multi': [
        'RAPT', 'RCON', 'RDUP', 'RMFD', 'RMOB', 'RMSC', 'RCOO', 'RQUA', 'RTIM',
        'RTRI', 'VRES'
    ],
    'mixed': []
}
exempt_codes = []

## Register input tables.

tf = TableFrame(staging.parcels_scl, index_col='parcel')
sim.add_table('parcels_in', tf, copy_col=False)


@sim.table(cache=True)
def scvta():
    # Will need to group by the site address fields later to aggregate
    # condos with multiple parcels but only a single polygon. Currently,
    # only one condo will join to the geometries in the shapefile.
    df = loader.get_attributes('built/parcel/2010/scl/Scvta031210.dbf')

    # Strip non-numeric characters in parcel numbers. Affects three records.
    df['PARCEL_NUM'] = df.PARCEL_NUM.str.replace('[^0-9]', '')

    # There are only 11 duplicated parcel numbers. From manual inspection,
    # it appears that the "ASSESSED_V" field is zero for one of the entries
Example #3
0
import urbansim.sim.simulation as sim

import utils

loader = TableLoader()
staging = loader.tables.staging

## Assumptions.

exempt_codes = []

## Register input tables.

# Alternate index column is "mapblklot", but it was not unique for
# parcels in Treasure Island.
tf = TableFrame(staging.parcels_sfr, index_col='blklot')
sim.add_table('parcels_in', tf, copy_col=False)

## Register output table.


@sim.table(cache=True)
def parcels_out(parcels_in):
    index = pd.Series(parcels_in.index).dropna().unique()
    df = pd.DataFrame(index=index)
    df.index.name = 'apn'
    return df


## Register output columns.
Example #4
0

# Use codes were classified manually because the assessor classifications
# are meant for property tax purposes. These classifications should be
# reviewed and revised.
res_codes = {'single': [str(i) for i in range(11, 17)] + ['19'],
             'multi': ['10', '17', '18', '61', '88'] +
                      [str(i) for i in range(20, 30)],
             'mixed': ['48', '89']}
exempt_codes = []


## Register input tables.


tf = TableFrame(staging.parcels_cnc_pt)
sim.add_table('parcels_in', tf, copy_col=False)


## Register intermediate table and columns.

# The purpose of this intermediate table is to compute certain fields,
# like non_residential_sqft and residential_units, before grouping together
# records with the same parc_py_id. Thus, single-family condominium units
# would each be assumed to have one residential unit, and this count would
# be summed when grouping later.


@sim.table()
def parcels_in2(parcels_in):
    return pd.DataFrame(index=parcels_in.index)
Example #5
0
# Use codes were classified manually because the assessor classifications
# are meant for property tax purposes. These classifications should be
# reviewed and revised.
res_codes = {
    'single':
    ([1100] + range(1120, 1151) + range(1200, 1501) + range(1900, 2000)),
    'multi': (range(600, 1100) + [1700] + range(2000, 3000) +
              range(5000, 5300) + range(7000, 7701) + [7800]),
    'mixed': (range(3900, 4000) + [4101] + [4191] + [4240] + [9401] + [9491])
}
exempt_codes = range(1, 1000)

## Register input tables.

tf = TableFrame(staging.parcels_ala, index_col='apn_sort')
sim.add_table('parcels_in', tf, copy_col=False)


@sim.table(cache=True)
def ie670():
    filepath = \
        loader.get_path('built/parcel/2010/ala/assessor_nov10/IE670c.txt')
    df = pd.read_table(filepath, sep='\t', index_col=False, low_memory=False)
    df.set_index("Assessor's Parcel Number (APN) sort format", inplace=True)
    assert df.index.is_unique
    assert not df.index.hasnans()
    return df


@sim.table(cache=True)
Example #6
0
from spandex import TableLoader, TableFrame
from spandex.utils import load_config
from spandex.io import exec_sql, df_to_db
import pandas.io.sql as sql


def db_to_df(query):
    """Executes SQL query and returns DataFrame."""
    conn = loader.database._connection
    return sql.read_frame(query, conn)


# Build parcels TableFrame.
loader = TableLoader()
table = loader.database.tables.public.parcels
tf = TableFrame(table, index_col='gid')

# Load TAZ residential unit control totals.
taz_controls_csv = loader.get_path('hh/taz2010_imputation.csv')
targetunits = pd.read_csv(taz_controls_csv, index_col='taz1454')['targetunits']

# Get CSV output file directory.
output_dir = loader.get_path('out/regeneration/summaries')

# Generate summary CSV by county and TAZ.
for grouper in ['county_id', 'taz']:
    df = tf[[grouper, 'non_residential_sqft', 'residential_units']]
    df.dropna(subset=[grouper], inplace=True)

    if grouper == 'taz':
        df[grouper] = df[grouper].astype(int)
Example #7
0
## Assumptions.


# Use codes were classified manually because the assessor classifications
# are meant for property tax purposes. These classifications should be
# reviewed and revised.
res_codes = {'single': [],
             'multi': [],
             'mixed': []}
exempt_codes = []


## Register input tables.


tf = TableFrame(staging.FIXME, index_col='FIXME')
sim.add_table('parcels_in', tf, copy_col=False)


## Register output table.


@sim.table(cache=True)
def parcels_out(parcels_in):
    index = pd.Series(parcels_in.index).dropna().unique()
    df = pd.DataFrame(index=index)
    df.index.name = 'apn'
    return df


## Register output columns.
Example #8
0

loader = TableLoader()
staging = loader.tables.staging


## Assumptions.


exempt_codes = []


## Register input tables.


tf = TableFrame(staging.parcels_nap, index_col='asmt')
sim.add_table('parcels_in', tf, copy_col=False)


@sim.table(cache=True)
def buildings():
    df = loader.get_attributes('built/parcel/2010/nap/Napa_buildings.dbf')

    # Usually duplicate records are similar, but sometimes the last record
    # appears to have more information.
    df.drop_duplicates('FeeParcel', take_last=True, inplace=True)

    df.set_index('FeeParcel', inplace=True)
    assert df.index.is_unique
    assert not df.index.hasnans()
    return df