def download_tiger_data(shp_path_only=False):
    """"""

    tiger_url = 'ftp://ftp2.census.gov/geo/tiger/TIGER{yr}'.format(
        yr=gv.tiger_year)

    gv.shp = dict()
    for prod in gv.product:
        prod_name = TIGER_PRODUCT[prod].lower()
        prod_class = ''.join([c for c in TIGER_PRODUCT[prod] if c.isalpha()])
        prod_dir = join(gv.data_dir, prod_class)

        if not exists(prod_dir):
            os.makedirs(prod_dir)

        for st in gv.states:
            prod_url = '{base_url}/{class_}/' \
                       'tl_{yr}_{fips}_{name}.zip'.format(
                            base_url=tiger_url, class_=prod_class,
                            yr=gv.tiger_year, fips=gv.state_fips[st],
                            name=prod_name)

            # add names of shapefiles to dictionary mapping to the
            # table that they will be inserted into
            shp_name = '{}.shp'.format(splitext(basename(prod_url))[0])
            shp_path = join(prod_dir, shp_name)
            gv.shp[shp_path] = prod

            if not shp_path_only:
                prod_path = utils.download_with_progress(prod_url, prod_dir)
                with ZipFile(prod_path, 'r') as z:
                    print '\nunzipping...'
                    z.extractall(prod_dir)
def download_tiger_data(shp_path_only=False):
    """"""

    tiger_url = 'ftp://ftp2.census.gov/geo/tiger/TIGER{yr}'.format(
        yr=gv.tiger_year)

    gv.shp = dict()
    for prod in gv.product:
        prod_name = TIGER_PRODUCT[prod].lower()
        prod_class = ''.join([c for c in TIGER_PRODUCT[prod] if c.isalpha()])
        prod_dir = join(gv.data_dir, prod_class)

        if not exists(prod_dir):
            os.makedirs(prod_dir)

        for st in gv.states:
            prod_url = '{base_url}/{class_}/' \
                       'tl_{yr}_{fips}_{name}.zip'.format(
                            base_url=tiger_url, class_=prod_class,
                            yr=gv.tiger_year, fips=gv.state_fips[st],
                            name=prod_name)

            # add names of shapefiles to dictionary mapping to the
            # table that they will be inserted into
            shp_name = '{}.shp'.format(splitext(basename(prod_url))[0])
            shp_path = join(prod_dir, shp_name)
            gv.shp[shp_path] = prod

            if not shp_path_only:
                prod_path = utils.download_with_progress(prod_url, prod_dir)
                with ZipFile(prod_path, 'r') as z:
                    print '\nunzipping...'
                    z.extractall(prod_dir)
def download_acs_data():
    """"""

    # get raw census data in text delimited form, the data has been
    # grouped into what the Census Bureau calls 'sequences'
    acs_url = 'http://www2.census.gov/programs-surveys/' \
              'acs/summary_file/{yr}'.format(yr=gv.acs_year)

    for geog in ACS_GEOGRAPHY:
        geog_dir = join(gv.data_dir, geog.lower())

        if not exists(geog_dir):
            os.makedirs(geog_dir)

        for st in gv.states:
            st_name = gv.state_names[st]
            geog_url = '{base_url}/data/{span}_year_by_state/' \
                       '{state}_{geography}.zip'.format(
                            base_url=acs_url, span=gv.span,
                            state=st_name, geography=geog)

            geog_path = utils.download_with_progress(geog_url, geog_dir)
            with ZipFile(geog_path, 'r') as z:
                print '\nunzipping...'
                z.extractall(dirname(geog_path))

    # the raw csv doesn't have field names for metadata, the templates
    # downloaded below provide that (but only the geoheader metadata
    # will be used by this process)
    schema_url = '{base_url}/data/{yr}_{span}yr_' \
                 'Summary_FileTemplates.zip'.format(
                      base_url=acs_url, yr=gv.acs_year, span=gv.span)

    schema_path = utils.download_with_progress(schema_url, gv.data_dir)
    with ZipFile(schema_path, 'r') as z:
        print '\nunzipping...'
        z.extractall(dirname(schema_path))

    # download the lookup table that contains information as to how to
    # extract the ACS tables from the sequences
    lookup_url = '{base_url}/documentation/user_tools/' \
                 '{lookup}'.format(base_url=acs_url, lookup=gv.lookup_file)
    utils.download_with_progress(lookup_url, gv.data_dir)