def print_errors(errors, files=None, csv_path=None):
    """
    Writes a list of errors to a CSV file, or prints to the console

    If files is provided, then the root of the file data is printed, instead of the file number.

    :param errors: list [(file#, line#, "Issue")]
    :param files: dict {file#: ("root", "map_path", "hash_path")}
    :param file_path: string path to location to create a CSV file.  If None, print to console
    :return: None
    """
    errors.sort()
    if csv_path is None:
        for file_num, line_num, issue in errors:
            if files is not None:
                root = files[file_num][0]
            else:
                root = file_num
            print("{0}, {1}, {2}".format(root, line_num, issue))
    else:
        with csv23.open(csv_path, "w") as csv_file:
            csv_writer = csv.writer(csv_file)
            header = ["file", "line", "issue"]
            csv23.write(csv_writer, header)
            for file_num, line_num, issue in errors:
                if files is not None:
                    root = files[file_num]
                else:
                    root = file_num
                row = [root, line_num, issue]
                csv23.write(csv_writer, row)
Esempio n. 2
0
def write_folder_to_file(folder, csv_path):
    hashlist = get_file_hashes(folder)
    with csv23.open(csv_path, "w") as csv_file:
        csv_writer = csv.writer(csv_file)
        header = ["path", "name", "hash"]
        csv23.write(csv_writer, header)
        for row in hashlist:
            csv23.write(csv_writer, row)
def make_csv(kind, fields):
    """Write a CSV file for all the fields found for kind."""

    data = get_all_items(kind)
    with csv23.open(kind + ".csv", "w") as out_file:
        writer = csv.writer(out_file)
        csv23.write(writer, fields)
        for item in data:
            values = simplify(item, fields)
            csv23.write(writer, values)
Esempio n. 4
0
def ashowone(park, csv_path=None):
    if csv_path:
        with csv23.open(csv_path, "w") as csv_file:
            csv_writer = csv.writer(csv_file)
            csv23.write(csv_writer, asset_header)
            for row in assets(park):
                csv23.write(csv_writer, row)
    else:
        print(",".join(asset_header))
        for item in assets(park):
            print(",".join([my_str(x) for x in item]))
Esempio n. 5
0
def build_csv(csv_path):
    with csv23.open(csv_path, "w") as csv_file:
        csv_writer = csv.writer(csv_file)
        csv23.write(csv_writer, table_column_names)
        for site in sites:
            site_id = sites[site]
            for asset_code in asset_types:
                data = [
                    site, "{0}".format(asset_code), asset_types[asset_code]
                ]
                print(data)
                response = location_query(site_id, "{0}".format(asset_code))
                convert_xml_to_csv(data, response, csv_writer)
Esempio n. 6
0
def lshowall(csv_path=None):
    if csv_path:
        with csv23.open(csv_path, "w") as csv_file:
            csv_writer = csv.writer(csv_file)
            csv23.write(csv_writer, loc_header)
            for park in sites:
                for row in locations(park):
                    csv23.write(csv_writer, row)
    else:
        print(",".join(loc_header))
        for park in sites:
            for item in locations(park):
                print(",".join([my_str(x) for x in item]))
Esempio n. 7
0
def create_csv(csv_path, folder):
    """Create a CSV file describing all the photos in folder."""

    # pylint: disable=too-many-locals

    header = "folder,photo,id,namedate,exifdate,lat,lon,gpsdate,size,filedate"
    with csv23.open(csv_path, "w") as csv_file:
        csv_writer = csv.writer(csv_file)
        csv23.write(csv_writer, header.split(","))
        for root, dirs, files in os.walk(root):
            folder = os.path.basename(root)
            # Remove the skip dirs from the current sub directories
            for skipdir in Config.skip_dirs:
                if skipdir in dirs:
                    dirs.remove(skipdir)
            for filename in files:
                base, extension = os.path.splitext(filename)
                if extension.lower() == ".jpg":
                    code, name_date = get_name_parts(base)
                    path = os.path.join(root, filename)
                    size = os.path.getsize(path)
                    with open(path, "rb") as in_file:
                        # exifread wants binary data
                        tags = exifread.process_file(in_file, details=False)
                        lat = get_latitude(tags)
                        lon = get_longitude(tags)
                        exif_date = get_exif_date(tags)
                        gps_date = get_gps_date(tags)
                    file_date = datetime.datetime.fromtimestamp(
                        os.path.getmtime(path)).isoformat()
                    # convert date to a microsoft excel acceptable ISO format
                    file_date = file_date.replace("T", " ")
                    row = [
                        folder,
                        filename,
                        code,
                        name_date,
                        exif_date,
                        lat,
                        lon,
                        gps_date,
                        size,
                        file_date,
                    ]
                    csv23.write(csv_writer, row)
Esempio n. 8
0
def writedata(filename, data):
    """Write the data as CSV in filename."""

    # pylint: disable=too-many-locals

    with csv23.open(filename, "w") as out_file:
        writer = csv.writer(out_file)
        header = [
            "Site",
            "Team",
            "Quad",
            "Corner",
            "Easting",
            "Northing",
            "SideLength",
            "Slope(%)",
        ]
        csv23.write(writer, header)
        for name in sorted(data.keys()):
            site, teamquad = name.split("|")
            team = teamquad[:1]
            quad = teamquad[1:]
            if 1 in data[name]:
                p_1 = data[name][1]
            else:
                print("Error Corner 1 not found in " + name)
                continue
            if 2 in data[name]:
                p_3 = data[name][2]
            else:
                print("Error Corner 2 not found in " + name)
                continue
            p_2, p_4, side_len, slope = corners3d(p_1, p_3)
            row = [
                site,
                team,
                quad,
                3,
                p_2[0],
                p_2[1],
                round(side_len, 3),
                round(slope, 3),
            ]
            csv23.write(writer, row)
            row = [
                site,
                team,
                quad,
                4,
                p_4[0],
                p_4[1],
                round(side_len, 3),
                round(slope, 3),
            ]
            csv23.write(writer, row)
Esempio n. 9
0
def test_csv(csv_path):
    with csv23.open(csv_path, "w") as csv_file:
        csv_writer = csv.writer(csv_file)
        csv23.write(csv_writer, table_column_names)
        response = test_service()
        convert_xml_to_csv(["ANIA", "4100", "Building"], response, csv_writer)
Esempio n. 10
0
def convert_xml_to_csv(data, response, csv_writer):
    rows = convert_xml_to_rows(data, response)
    for row in rows:
        csv23.write(csv_writer, row)
def organize():
    """Add additional attributes to the ifsar file list."""

    # pylint: disable=too-many-locals,too-many-branches,too-many-statements

    with csv23.open(Config.out_path, "w") as out_file:
        csv_writer = csv.writer(out_file)
        header = [
            "folder",
            "filename",
            "ext",
            "size",
            "legacy",
            "nga",
            "kind",
            "edge",
            "cell",
            "lat",
            "lon",
            "tfw",
            "xml",
            "html",
            "txt",
            "tif_xml",
            "ovr",
            "aux",
            "rrd",
            "aux_old",
            "crc",
            "extras",
            "skip",
        ]
        csv23.write(csv_writer, header)

        with csv23.open(Config.csv_path, "r") as csv_file:
            csv_reader = csv.reader(csv_file)
            next(csv_reader)  # ignore the header
            for row in csv_reader:
                row = csv23.fix(row)
                path_in, name, ext, size = row
                name = name.lower()
                path = path_in.lower()
                legacy = "N"
                if "legacy" in path:
                    legacy = "Y"
                nga = "N"
                if "nga_30" in path:
                    nga = "Y"
                kind = ""
                if "ori" in name or ("ori" in path and "priority" not in path):
                    kind = "ori"
                if kind == "ori" and "_sup" in name:
                    kind = "ori_sup"
                if "dsm" in name or "dsm" in path:
                    kind = "dsm"
                if "dtm" in name or "dtm" in path:
                    kind = "dtm"
                edge = "N"
                if "edge" in path:
                    edge = "Y"
                cell = ""
                lat, lon = 0, 0
                match = re.search(r"\\cell_(\d*)\\", path)
                if match:
                    cell = match.group(1)
                match = re.search(r"\\cell_([def])\\",
                                  path)  # path -> 196, e -> 197, f -> 198
                if match:
                    cell = ord(match.group(1)) - ord("d") + 196
                match = re.search(r"_n(\d*)w(\d*)", name)
                if match:
                    lat = int(match.group(1)) / 100
                    lon = int(match.group(2)) / 100
                #
                # Check for supplemental *.html, *.aux.xml, etc files
                #
                file_path = os.path.join(path_in, name)
                exts_found = [
                    sup.replace(file_path, "").lower()
                    for sup in glob.glob(file_path + ".*")
                ]
                # exts_possible = ['.tif', '.tfw','.xml','.html','.txt','.tif.xml',
                #                  '.tif.ovr','.tif.aux.xml', '.rrd', '.aux', '.tif.crc']
                tfw, xml, html, txt, tif_xml = 0, 0, 0, 0, 0
                ovr, aux, rrd, aux_old, crc = 0, 0, 0, 0, 0
                if ".tfw" in exts_found:
                    tfw = 1
                if ".xml" in exts_found:
                    xml = 1
                if ".html" in exts_found:
                    html = 1
                if ".txt" in exts_found:
                    txt = 1
                if ".tif.xml" in exts_found:
                    tif_xml = 1
                if ".tif.ovr" in exts_found:
                    ovr = 1
                if ".tif.aux.xml" in exts_found:
                    aux = 1
                if ".rrd" in exts_found:
                    rrd = 1
                if ".aux" in exts_found:
                    aux_old = 1
                if ".tif.crc" in exts_found:
                    crc = 1
                extras = (len(exts_found) - 1 - tfw - xml - html - txt -
                          tif_xml - ovr - aux - rrd - aux_old - crc
                          )  # 1 for the tif that must exist
                out_row = [
                    path_in,
                    name,
                    ext,
                    size,
                    legacy,
                    nga,
                    kind,
                    edge,
                    cell,
                    lat,
                    lon,
                    tfw,
                    xml,
                    html,
                    txt,
                    tif_xml,
                    ovr,
                    aux,
                    rrd,
                    aux_old,
                    crc,
                    extras,
                    "N",
                ]
                csv23.write(csv_writer, out_row)
Esempio n. 12
0
class Config(object):
    """Namespace for configuration parameters. Edit as needed."""

    # pylint: disable=useless-object-inheritance,too-few-public-methods

    # Write the results to this file path.
    results = r"c:\tmp\sr.csv"
    # The folder to search.
    start = r"c:\tmp\Changing Tides"


with csv23.open(Config.results, "w") as f:
    csv_writer = csv.writer(f)
    header = ["mxd", "data_frame", "spatial_reference"]
    csv23.write(csv_writer, header)
    csv_writer.writerow()
    for root, dirs, files in os.walk(Config.start):
        for file in files:
            if os.path.splitext(file)[1].lower() == ".mxd":
                suspect = os.path.join(root, file)
                print("Checking {}".format(suspect))
                try:
                    mxd = arcpy.mapping.MapDocument(suspect)
                    for df in arcpy.mapping.ListDataFrames(mxd):
                        print("  data frame {0} has spatial reference: {1}".
                              format(df.name, df.spatialReference.name))
                        row = [suspect, df.name, df.spatialReference.name]
                        csv23.write(csv_writer, row)
                except arcpy.ExecuteError:
                    print("ERROR: Unable to check document")
Esempio n. 13
0
 def put_in_csv(row):
     csv23.write(csv_writer, row)