Ejemplo n.º 1
0
def get_processor_capacity(primary_processing, logger):
    capacity = 0

    if primary_processing == "FTx":
        capacity = Q_(200000, "kgal")
    if primary_processing == "Petroleum_Refinery":
        capacity = Q_(7665000, "kgal")
    else:
        capacity = Q_(200000, "kgal")

    return capacity
def make_emission_factors_dict(the_scenario, logger):

    # check for emission factors file
    ftot_program_directory = os.path.dirname(os.path.realpath(__file__))
    emission_factors_path = os.path.join(ftot_program_directory, "lib",
                                         "detailed_emission_factors.csv")
    if not os.path.exists(emission_factors_path):
        logger.warning(
            "warning: cannot find detailed_emission_factors file: {}".format(
                emission_factors_path))
        return {}  # return empty dict

    # query vehicle labels for validation
    available_vehicles = ['Default']
    with sqlite3.connect(the_scenario.main_db) as main_db_con:
        db_cur = main_db_con.cursor()
        all_vehicles = main_db_con.execute(
            "select vehicle_label from vehicle_types;")
        all_vehicles = all_vehicles.fetchall()
        for row in all_vehicles:
            available_vehicles.append(row[0])
    available_vehicles = set(available_vehicles)

    # initialize emission factors dict and read through detailed_emission_factors CSV
    factors_dict = {}
    with open(emission_factors_path, 'r') as ef:
        line_num = 1
        for line in ef:
            if line_num == 1:
                pass  # do nothing
            else:
                flds = line.rstrip('\n').split(',')
                vehicle_label = flds[0]
                mode = flds[1].lower()
                road_type = flds[2]
                pollutant = flds[3].lower()
                factor = flds[4]

                # Check vehicle label
                # Note: We're strict with capitalization here since vehicle_types processing is also strict
                if vehicle_label not in available_vehicles:
                    logger.warning(
                        "Vehicle: {} in detailed emissions files is not recognized."
                        .format(vehicle_label))

                # Check mode
                assert mode in [
                    'road', 'water', 'rail'
                ], "Mode: {} is not supported. Please specify road, water, or rail.".format(
                    mode)

                # Check road type
                if mode == 'road':
                    allowed_road_types = [
                        'Urban_Unrestricted', 'Urban_Restricted',
                        'Rural_Unrestricted', 'Rural_Restricted'
                    ]
                    assert road_type in allowed_road_types, "Road type: {} is not recognized. Road type must be one of {}.".format(
                        road_type, allowed_road_types)
                else:
                    assert road_type == 'NA', "Road type must be 'NA' for water and rail modes."

                assert pollutant in ['co','co2e','ch4','n2o','nox','pm10','pm2.5','voc'],\
                    "Pollutant: {} is not recognized. Refer to the documentation for allowed pollutants.".format(pollutant)

                # convert units
                # Pint throws an exception if units are invalid
                if mode == 'road':
                    factor = Q_(factor).to('g/mi')
                else:
                    factor = Q_(factor).to('g/{}/mi'.format(
                        the_scenario.default_units_solid_phase))

                # populate dictionary
                if mode not in factors_dict:
                    # create entry for new mode type
                    factors_dict[mode] = {}
                if mode == 'road':
                    # store emission factors for road
                    if vehicle_label not in factors_dict[mode]:
                        factors_dict[mode][vehicle_label] = {
                            pollutant: {
                                road_type: factor
                            }
                        }
                    elif pollutant not in factors_dict[mode][vehicle_label]:
                        factors_dict[mode][vehicle_label][pollutant] = {
                            road_type: factor
                        }
                    else:
                        if road_type in factors_dict[mode][vehicle_label][
                                pollutant].keys():
                            logger.warning('Road type: {} for pollutant: {} and vehicle: {} already exists. Overwriting with value: {}'.\
                                format(road_type, pollutant, vehicle_label, factor))
                        factors_dict[mode][vehicle_label][pollutant][
                            road_type] = factor
                else:
                    # store emission factors for non-road
                    if vehicle_label not in factors_dict[mode]:
                        factors_dict[mode][vehicle_label] = {pollutant: factor}
                    else:
                        if pollutant in factors_dict[mode][vehicle_label].keys(
                        ):
                            logger.warning(
                                'Pollutant: {} already exists for vehicle: {}. Overwriting with value: {}'
                                .format(pollutant, vehicle_label, factor))
                        factors_dict[mode][vehicle_label][pollutant] = factor

            line_num += 1

    return factors_dict
def get_commodity_vehicle_attributes_dict(the_scenario,
                                          logger,
                                          EmissionsWarning=False):

    with sqlite3.connect(the_scenario.main_db) as main_db_con:

        # query commodities table
        commodities_dict = {}  # key off ID
        commodities = main_db_con.execute(
            "select * from commodities where commodity_name <> 'multicommodity';"
        )
        commodities = commodities.fetchall()
        for row in commodities:
            commodity_id = str(row[0])
            commodity_name = row[1]
            commodities_dict[commodity_id] = commodity_name

        # query commodity modes table
        commodity_mode_dict = {
        }  # key off phase, then commodity name (not ID!)
        commodity_mode = main_db_con.execute("select * from commodity_mode;")
        commodity_mode = commodity_mode.fetchall()
        for row in commodity_mode:
            mode = row[0]
            commodity_id = row[1]
            phase = row[2]
            vehicle_label = row[3]
            allowed_yn = row[4]
            if allowed_yn == 'N':
                continue  # skip row if not permitted

            # use commodities dictionary to determine commodity name
            commodity_name = commodities_dict[commodity_id]

            # populate commodity modes dictionary
            if phase not in commodity_mode_dict:
                # add new phase to dictionary and start commodity and mode dictionary
                commodity_mode_dict[phase] = {
                    commodity_name: {
                        mode: vehicle_label
                    }
                }
            elif commodity_name not in commodity_mode_dict[phase]:
                # add new commodity to dictionary and start mode dictionary
                commodity_mode_dict[phase][commodity_name] = {
                    mode: vehicle_label
                }
            else:
                # add new mode to dictionary
                commodity_mode_dict[phase][commodity_name][
                    mode] = vehicle_label

        # query vehicle types table
        vehicle_types_dict = {}  # key off mode
        vehs = main_db_con.execute("select * from vehicle_types;")
        vehs = vehs.fetchall()
        for row in vehs:
            mode = row[0]
            vehicle_label = row[1]
            property_name = row[2]
            property_value = Q_(row[3])
            if mode not in vehicle_types_dict:
                # add new mode to dictionary and start vehicle and property dictionary
                vehicle_types_dict[mode] = {
                    vehicle_label: {
                        property_name: property_value
                    }
                }
            elif vehicle_label not in vehicle_types_dict[mode]:
                # add new vehicle to dictionary and start property dictionary
                vehicle_types_dict[mode][vehicle_label] = {
                    property_name: property_value
                }
            else:
                # add to existing dictionary entry
                vehicle_types_dict[mode][vehicle_label][
                    property_name] = property_value

    # load detailed emission factors
    factors_dict = make_emission_factors_dict(the_scenario, logger)

    # create commodity/vehicle attribute dictionary
    logger.debug("----- commodity/vehicle attribute table -----")

    attribute_dict = {}  # key off commodity name
    for phase in commodity_mode_dict:
        for commodity_name in commodity_mode_dict[phase]:
            for mode in commodity_mode_dict[phase][commodity_name]:

                # Get vehicle assigned to commodity on this mode
                vehicle_label = commodity_mode_dict[phase][commodity_name][
                    mode]

                if commodity_name not in attribute_dict:
                    # Create dictionary entry for commodity
                    attribute_dict[commodity_name] = {mode: {}}
                else:
                    # Create dictionary entry for commodity's mode
                    attribute_dict[commodity_name][mode] = {}

                # Set attributes based on mode, vehicle label, and commodity phase
                # ROAD
                if mode == 'road':
                    if vehicle_label == 'Default':
                        # use default attributes for trucks
                        if phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                'Load'] = the_scenario.truck_load_liquid
                        else:
                            attribute_dict[commodity_name][mode][
                                'Load'] = the_scenario.truck_load_solid

                        attribute_dict[commodity_name][mode][
                            'Fuel_Efficiency'] = the_scenario.truckFuelEfficiency
                        attribute_dict[commodity_name][mode][
                            'CO2urbanUnrestricted'] = the_scenario.CO2urbanUnrestricted
                        attribute_dict[commodity_name][mode][
                            'CO2urbanRestricted'] = the_scenario.CO2urbanRestricted
                        attribute_dict[commodity_name][mode][
                            'CO2ruralUnrestricted'] = the_scenario.CO2ruralUnrestricted
                        attribute_dict[commodity_name][mode][
                            'CO2ruralRestricted'] = the_scenario.CO2ruralRestricted

                    elif vehicle_label != 'NA':
                        # use user-specified vehicle attributes, or if missing, the default value
                        if phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                'Load'] = vehicle_types_dict[mode][
                                    vehicle_label]['Truck_Load_Liquid']
                        else:
                            attribute_dict[commodity_name][mode][
                                'Load'] = vehicle_types_dict[mode][
                                    vehicle_label]['Truck_Load_Solid']

                        attribute_dict[commodity_name][mode][
                            'Fuel_Efficiency'] = vehicle_types_dict[mode][
                                vehicle_label]['Truck_Fuel_Efficiency']
                        attribute_dict[commodity_name][mode][
                            'CO2urbanUnrestricted'] = vehicle_types_dict[mode][
                                vehicle_label]['Atmos_CO2_Urban_Unrestricted']
                        attribute_dict[commodity_name][mode][
                            'CO2urbanRestricted'] = vehicle_types_dict[mode][
                                vehicle_label]['Atmos_CO2_Urban_Restricted']
                        attribute_dict[commodity_name][mode][
                            'CO2ruralUnrestricted'] = vehicle_types_dict[mode][
                                vehicle_label]['Atmos_CO2_Rural_Unrestricted']
                        attribute_dict[commodity_name][mode][
                            'CO2ruralRestricted'] = vehicle_types_dict[mode][
                                vehicle_label]['Atmos_CO2_Rural_Restricted']

                # RAIL
                elif mode == 'rail':
                    if vehicle_label == 'Default':
                        # use default attributes for railcars
                        if phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                'Load'] = the_scenario.railcar_load_liquid
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = the_scenario.densityFactor * the_scenario.railroadCO2Emissions
                        else:
                            attribute_dict[commodity_name][mode][
                                'Load'] = the_scenario.railcar_load_solid
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = the_scenario.railroadCO2Emissions

                        attribute_dict[commodity_name][mode][
                            'Fuel_Efficiency'] = the_scenario.railFuelEfficiency

                    elif vehicle_label != 'NA':
                        # use user-specified vehicle attributes, or if missing, the default value
                        if phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                'Load'] = vehicle_types_dict[mode][
                                    vehicle_label]['Railcar_Load_Liquid']
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = the_scenario.densityFactor * vehicle_types_dict[
                                    mode][vehicle_label][
                                        'Railroad_CO2_Emissions']
                        else:
                            attribute_dict[commodity_name][mode][
                                'Load'] = vehicle_types_dict[mode][
                                    vehicle_label]['Railcar_Load_Solid']
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = vehicle_types_dict[mode][
                                    vehicle_label]['Railroad_CO2_Emissions']

                        attribute_dict[commodity_name][mode][
                            'Fuel_Efficiency'] = vehicle_types_dict[mode][
                                vehicle_label]['Rail_Fuel_Efficiency']

                # WATER
                elif mode == 'water':
                    if vehicle_label == 'Default':
                        # use default attributes barges
                        if phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                'Load'] = the_scenario.barge_load_liquid
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = the_scenario.densityFactor * the_scenario.bargeCO2Emissions
                        else:
                            attribute_dict[commodity_name][mode][
                                'Load'] = the_scenario.barge_load_solid
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = the_scenario.bargeCO2Emissions

                        attribute_dict[commodity_name][mode][
                            'Fuel_Efficiency'] = the_scenario.bargeFuelEfficiency

                    elif vehicle_label != 'NA':
                        # use user-specified vehicle attributes, or if missing, the default value
                        if phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                'Load'] = vehicle_types_dict[mode][
                                    vehicle_label]['Barge_Load_Liquid']
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = the_scenario.densityFactor * vehicle_types_dict[
                                    mode][vehicle_label]['Barge_CO2_Emissions']
                        else:
                            attribute_dict[commodity_name][mode][
                                'Load'] = vehicle_types_dict[mode][
                                    vehicle_label]['Barge_Load_Solid']
                            attribute_dict[commodity_name][mode][
                                'CO2_Emissions'] = vehicle_types_dict[mode][
                                    vehicle_label]['Barge_CO2_Emissions']

                        attribute_dict[commodity_name][mode][
                            'Fuel_Efficiency'] = vehicle_types_dict[mode][
                                vehicle_label]['Barge_Fuel_Efficiency']

                # PIPELINE
                elif mode == 'pipeline_crude_trf_rts':
                    attribute_dict[commodity_name][mode][
                        'Load'] = the_scenario.pipeline_crude_load_liquid
                    attribute_dict[commodity_name][mode][
                        'CO2_Emissions'] = the_scenario.densityFactor * the_scenario.pipelineCO2Emissions

                elif mode == 'pipeline_prod_trf_rts':
                    attribute_dict[commodity_name][mode][
                        'Load'] = the_scenario.pipeline_prod_load_liquid
                    attribute_dict[commodity_name][mode][
                        'CO2_Emissions'] = the_scenario.densityFactor * the_scenario.pipelineCO2Emissions

                # DETAILED EMISSION FACTORS
                # Include detailed emission factors
                if mode in factors_dict and vehicle_label in factors_dict[mode]:
                    # loop through emission factors
                    for pollutant in factors_dict[mode][vehicle_label]:
                        if mode in ['rail', 'water'] and phase == 'liquid':
                            attribute_dict[commodity_name][mode][
                                pollutant] = the_scenario.densityFactor * factors_dict[
                                    mode][vehicle_label][pollutant]
                        else:
                            attribute_dict[commodity_name][mode][
                                pollutant] = factors_dict[mode][vehicle_label][
                                    pollutant]

                # Code block below checks if user assigns custom vehicle without detailed emission factors -->
                if mode in vehicle_types_dict and vehicle_label in vehicle_types_dict[
                        mode]:
                    # user used a custom vehicle. check if wants detailed emissions reporting.
                    if the_scenario.detailed_emissions and EmissionsWarning:
                        # warn if don't have matching emission factors
                        if mode not in factors_dict or vehicle_label not in factors_dict[
                                mode]:
                            logger.warning(
                                "Detailed emission factors are not specified for vehicle: {} for mode: {}. Excluding this vehicle from the emissions report."
                                .format(vehicle_label, mode))

                for attr in attribute_dict[commodity_name][mode].keys():
                    attr_value = attribute_dict[commodity_name][mode][attr]
                    logger.debug(
                        "Commodity: {}, Mode: {}, Attribute: {}, Value: {}".
                        format(commodity_name, mode, attr, attr_value))

    return attribute_dict  # Keyed off of commodity name, then mode, then vehicle attribute
Ejemplo n.º 4
0
def locations_add_links(logger, the_scenario, modal_layer_name,
                        max_artificial_link_distance_miles):

    # ADD LINKS LOGIC
    # first we near the mode to the locations fc
    # then we iterate through the near table and build up a dictionary of links and all the near XYs on that link.
    # then we split the links on the mode (except pipeline) and preserve the data of that link.
    # then we near the locations to the nodes on the now split links.
    # we ignore locations with near dist == 0 on those nodes.
    # then we add the artificial link and note which locations got links.
    # then we set the connects_to  field if the location was connected.

    logger.debug(
        "start: locations_add_links for mode: {}".format(modal_layer_name))

    scenario_gdb = the_scenario.main_gdb
    fp_to_modal_layer = os.path.join(scenario_gdb, "network", modal_layer_name)

    locations_fc = the_scenario.locations_fc
    arcpy.DeleteField_management(fp_to_modal_layer, "LOCATION_ID")
    arcpy.AddField_management(os.path.join(scenario_gdb, modal_layer_name),
                              "LOCATION_ID", "long")

    arcpy.DeleteField_management(fp_to_modal_layer, "LOCATION_ID_NAME")
    arcpy.AddField_management(os.path.join(scenario_gdb, modal_layer_name),
                              "LOCATION_ID_NAME", "text")

    if float(max_artificial_link_distance_miles.strip(" Miles")) < 0.0000001:
        logger.warning(
            "Note: ignoring mode {}. User specified artificial link distance of {}"
            .format(modal_layer_name, max_artificial_link_distance_miles))
        logger.debug(
            "Setting the definition query to artificial = 99999, so we get an empty dataset for the "
            "make_feature_layer and subsequent near analysis")

        definition_query = "Artificial = 999999"  # something to return an empty set
    else:
        definition_query = "Artificial = 0"  # the normal def query.

    if "pipeline" in modal_layer_name:

        if arcpy.Exists(
                os.path.join(scenario_gdb, "network",
                             fp_to_modal_layer + "_points")):
            arcpy.Delete_management(
                os.path.join(scenario_gdb, "network",
                             fp_to_modal_layer + "_points"))

        # limit near to end points
        arcpy.FeatureVerticesToPoints_management(
            in_features=fp_to_modal_layer,
            out_feature_class=fp_to_modal_layer + "_points",
            point_location="BOTH_ENDS")
        logger.debug("start:  make_featurelayer_management")
        arcpy.MakeFeatureLayer_management(fp_to_modal_layer + "_points",
                                          "modal_lyr_" + modal_layer_name,
                                          definition_query)

    else:
        logger.debug("start:  make_featurelayer_management")
        arcpy.MakeFeatureLayer_management(fp_to_modal_layer,
                                          "modal_lyr_" + modal_layer_name,
                                          definition_query)

    logger.debug(
        "adding links between locations_fc and mode {} with max dist of {}".
        format(modal_layer_name, max_artificial_link_distance_miles))

    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_near")):
        logger.debug("start:  delete tmp near")
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near"))

    logger.debug("start:  generate_near")
    arcpy.GenerateNearTable_analysis(locations_fc,
                                     "modal_lyr_" + modal_layer_name,
                                     os.path.join(scenario_gdb, "tmp_near"),
                                     max_artificial_link_distance_miles,
                                     "LOCATION", "NO_ANGLE", "CLOSEST")

    edit = arcpy.da.Editor(os.path.join(scenario_gdb))
    edit.startEditing(False, False)
    edit.startOperation()

    id_fieldname = arcpy.Describe(os.path.join(scenario_gdb,
                                               modal_layer_name)).OIDFieldName

    seenids = {}

    # SPLIT LINKS LOGIC
    # 1) first search through the tmp_near fc and add points from the near on that link.
    # 2) next we query the mode layer and get the mode specific data using the near FID.
    # 3) then we split the old link, and use insert cursor to populate mode specific data into fc for the two new links.
    # 4) then we delete the old unsplit link
    logger.debug("start:  split links")
    with arcpy.da.SearchCursor(
            os.path.join(scenario_gdb, "tmp_near"),
        ["NEAR_FID", "NEAR_X", "NEAR_Y", "NEAR_DIST"]) as scursor:

        for row in scursor:

            # if the near distance is 0, then its connected and we don't need to
            # split the line.
            if row[3] == 0:
                # only give debug warnring if not pipeline.
                if "pipleine" not in modal_layer_name:
                    logger.warning(
                        "Split links code: LOCATION MIGHT BE ON THE NETWORK. Ignoring NEAR_FID {} with NEAR_DIST {}"
                        .format(row[0], row[3]))

            if not row[3] == 0:

                # STEP 1: point geoms where to split from the near XY
                # ---------------------------------------------------
                # get the line ID to split
                theIdToGet = str(row[0])  # this is the link id we need.

                if not theIdToGet in seenids:
                    seenids[theIdToGet] = []

                point = arcpy.Point()
                point.X = float(row[1])
                point.Y = float(row[2])
                point_geom = arcpy.PointGeometry(point,
                                                 ftot_supporting_gis.LCC_PROJ)
                seenids[theIdToGet].append(point_geom)

        # STEP 2 -- get mode specific data from the link
        # ------------------------------------------------
        if 'pipeline' not in modal_layer_name:

            for theIdToGet in seenids:

                # initialize the variables so we dont get any gremlins.
                in_line = None  # the shape geometry
                in_capacity = None  # road + rail
                in_volume = None  # road + rail
                in_vcr = None  # road + rail | volume to capacity ratio
                in_fclass = None  # road | fclass
                in_speed = None  # road | rounded speed
                in_stracnet = None  # rail
                in_density_code = None  # rail
                in_tot_up_dwn = None  # water

                if modal_layer_name == 'road':
                    for row in arcpy.da.SearchCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                "SHAPE@", "Capacity", "Volume", "VCR",
                                "FCLASS", "ROUNDED_SPEED"
                            ],
                            where_clause=id_fieldname + " = " + theIdToGet):
                        in_line = row[0]
                        in_capacity = row[1]
                        in_volume = row[2]
                        in_vcr = row[3]
                        in_fclass = row[4]
                        in_speed = row[5]

                if modal_layer_name == 'rail':
                    for row in arcpy.da.SearchCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                "SHAPE@", "Capacity", "Volume", "VCR",
                                "STRACNET", "DENSITY_CODE"
                            ],
                            where_clause=id_fieldname + " = " + theIdToGet):
                        in_line = row[0]
                        in_capacity = row[1]
                        in_volume = row[2]
                        in_vcr = row[3]
                        in_stracnet = row[4]
                        in_density_code = row[5]

                if modal_layer_name == 'water':
                    for row in arcpy.da.SearchCursor(
                            os.path.join(scenario_gdb, modal_layer_name),
                        ["SHAPE@", "Capacity", "Volume", "VCR", "TOT_UP_DWN"],
                            where_clause=id_fieldname + " = " + theIdToGet):
                        in_line = row[0]
                        in_capacity = row[1]
                        in_volume = row[2]
                        in_vcr = row[3]
                        in_tot_up_dwn = row[4]

                # STEP 3: Split and populate with mode specific data from old link
                # ------------------------------------------------------------------
                split_lines = arcpy.management.SplitLineAtPoint(
                    in_line, seenids[theIdToGet], arcpy.Geometry(), 1)

                if not len(split_lines) == 1:

                    # ROAD
                    if modal_layer_name == 'road':

                        icursor = arcpy.da.InsertCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES',
                                'FCLASS', 'ROUNDED_SPEED', 'Volume',
                                'Capacity', 'VCR'
                            ])

                        # Insert new links that include the mode-specific attributes
                        for new_line in split_lines:
                            len_in_miles = Q_(new_line.length,
                                              "meters").to("miles").magnitude
                            icursor.insertRow([
                                new_line, 0, modal_layer_name, len_in_miles,
                                in_fclass, in_speed, in_volume, in_capacity,
                                in_vcr
                            ])

                        # Delete cursor object
                        del icursor

                    elif modal_layer_name == 'rail':
                        icursor = arcpy.da.InsertCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES',
                                'STRACNET', 'DENSITY_CODE', 'Volume',
                                'Capacity', 'VCR'
                            ])

                        # Insert new rows that include the mode-specific attributes
                        for new_line in split_lines:
                            len_in_miles = Q_(new_line.length,
                                              "meters").to("miles").magnitude
                            icursor.insertRow([
                                new_line, 0, modal_layer_name, len_in_miles,
                                in_stracnet, in_density_code, in_volume,
                                in_capacity, in_vcr
                            ])

                        # Delete cursor object
                        del icursor

                    elif modal_layer_name == 'water':

                        icursor = arcpy.da.InsertCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES',
                                'TOT_UP_DWN', 'Volume', 'Capacity', 'VCR'
                            ])

                        # Insert new rows that include the mode-specific attributes
                        for new_line in split_lines:
                            len_in_miles = Q_(new_line.length,
                                              "meters").to("miles").magnitude
                            icursor.insertRow([
                                new_line, 0, modal_layer_name, len_in_miles,
                                in_tot_up_dwn, in_volume, in_capacity, in_vcr
                            ])

                        # Delete cursor object
                        del icursor

                    else:
                        logger.warning(
                            "Modal_layer_name: {} is not supported.".format(
                                modal_layer_name))

                    # STEP 4:  Delete old unsplit data
                    with arcpy.da.UpdateCursor(os.path.join(
                            scenario_gdb, modal_layer_name), ['OID@'],
                                               where_clause=id_fieldname +
                                               " = " + theIdToGet) as ucursor:
                        for row in ucursor:
                            ucursor.deleteRow()

                # if the split doesn't work.
                else:
                    logger.detailed_debug(
                        "the line split didn't work for ID: {}. "
                        "Might want to investigate. "
                        "Could just be an artifact from the near result being the end of a line."
                        .format(theIdToGet))

    edit.stopOperation()
    edit.stopEditing(True)

    # delete the old features
    # ------------------------
    logger.debug(
        "start:  delete old features (tmp_near, tmp_near_2, tmp_nodes)")
    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_near")):
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near"))

    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_near_2")):
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near_2"))

    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_nodes")):
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_nodes"))

    # Add artificial links now.
    # now that the lines have been split add lines from the from points to the nearest node
    # --------------------------------------------------------------------------------------
    logger.debug(
        "start:  add artificial links now w/ definition_query: {}".format(
            definition_query))
    logger.debug("start:  make_featurelayer 2")
    fp_to_modal_layer = os.path.join(scenario_gdb, "network", modal_layer_name)
    arcpy.MakeFeatureLayer_management(fp_to_modal_layer,
                                      "modal_lyr_" + modal_layer_name + "2",
                                      definition_query)
    logger.debug("start:  feature vertices to points 2")
    arcpy.FeatureVerticesToPoints_management(
        in_features="modal_lyr_" + modal_layer_name + "2",
        out_feature_class=os.path.join(scenario_gdb, "tmp_nodes"),
        point_location="BOTH_ENDS")
    logger.debug("start:  generate near table 2")
    arcpy.GenerateNearTable_analysis(locations_fc,
                                     os.path.join(scenario_gdb, "tmp_nodes"),
                                     os.path.join(scenario_gdb, "tmp_near_2"),
                                     max_artificial_link_distance_miles,
                                     "LOCATION", "NO_ANGLE", "CLOSEST")

    logger.debug("start:  delete tmp_nodes")
    arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_nodes"))

    logger.debug("start:  start editor")
    edit = arcpy.da.Editor(os.path.join(scenario_gdb))
    edit.startEditing(False, False)
    edit.startOperation()

    icursor = arcpy.da.InsertCursor(
        os.path.join(scenario_gdb, modal_layer_name), [
            'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES', 'LOCATION_ID',
            'LOCATION_ID_NAME'
        ])  # add location_id for setting flow restrictions

    location_id_name_dict = get_location_id_name_dict(the_scenario, logger)
    connected_location_ids = []
    connected_location_id_names = []
    logger.debug("start:  search cursor on tmp_near_2")
    with arcpy.da.SearchCursor(
            os.path.join(scenario_gdb, "tmp_near_2"),
        ["FROM_X", "FROM_Y", "NEAR_X", "NEAR_Y", "NEAR_DIST", "IN_FID"
         ]) as scursor:

        for row in scursor:

            if not row[4] == 0:

                # use the unique objectid (in_fid) from the near to determine
                # if we have an in or an out location.
                # then set the flow restrictions appropriately.

                in_fid = row[5]
                location_id_name = location_id_name_dict[in_fid]
                location_id = location_id_name.split("_")[0]
                connected_location_ids.append(location_id)
                connected_location_id_names.append(location_id_name)

                coordList = []
                coordList.append(arcpy.Point(row[0], row[1]))
                coordList.append(arcpy.Point(row[2], row[3]))
                polyline = arcpy.Polyline(arcpy.Array(coordList))

                len_in_miles = Q_(polyline.length,
                                  "meters").to("miles").magnitude

                # insert artificial link attributes
                icursor.insertRow([
                    polyline, 1, modal_layer_name, len_in_miles, location_id,
                    location_id_name
                ])

            else:
                logger.warning(
                    "Artificial Link code: Ignoring NEAR_FID {} with NEAR_DIST {}"
                    .format(row[0], row[4]))

    del icursor
    logger.debug("start:  stop editing")
    edit.stopOperation()
    edit.stopEditing(True)

    arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near_2"))

    # ALSO SET CONNECTS_X FIELD IN POINT LAYER
    # -----------------------------------------
    logger.debug("start:  connect_x")
    arcpy.AddField_management(os.path.join(scenario_gdb, locations_fc),
                              "connects_" + modal_layer_name, "SHORT")
    arcpy.CalculateField_management(os.path.join(scenario_gdb, locations_fc),
                                    "connects_" + modal_layer_name, 0,
                                    "PYTHON_9.3")

    edit = arcpy.da.Editor(scenario_gdb)
    edit.startEditing(False, False)
    edit.startOperation()
    with arcpy.da.UpdateCursor(
            os.path.join(scenario_gdb, locations_fc),
        ["LOCATION_ID_NAME", "connects_" + modal_layer_name]) as cursor:

        for row in cursor:

            if row[0] in connected_location_id_names:
                row[1] = 1
                cursor.updateRow(row)

    edit.stopOperation()
    edit.stopEditing(True)

    logger.debug("finish: locations_add_links")
Ejemplo n.º 5
0
def get_input_and_output_commodity_quantities_from_afpat(
        commodity, process, the_scenario, logger):

    input_commodity_quantities = 0  # a quantity of input resource required to produce fuels
    output_commodity_quantities = {
    }  # a dictionary containing the fuel outputs

    ag_fuel_yield_dict, cropYield, bioWasteDict, fossilResources = load_afpat_tables(
        the_scenario, logger)

    if commodity.lower().find("test_liquid_none_none") > -1:
        #        print "in the right place"
        input_commodity_quantities = Q_(1, "kgal")
        output_commodity_quantities['test_product_liquid_None_None'] = Q_(
            1, "kgal")
        output_commodity_quantities['jet'] = Q_(0, "kgal")
        output_commodity_quantities['diesel'] = Q_(0, "kgal")
        output_commodity_quantities['naphtha'] = Q_(0, "kgal")
        output_commodity_quantities['aromatics'] = Q_(0, "kgal")
        output_commodity_quantities['total_fuel'] = Q_(1, "kgal")
        # hack to skip the petroleum refinery
        commodity = "hack-hack-hack"
        process = "hack-hack-hack"

    elif commodity in ag_fuel_yield_dict:
        #        print "in the wrong right place"
        if process in ag_fuel_yield_dict[commodity]:

            input_commodity_quantities = Q_(
                ag_fuel_yield_dict[commodity][process][8], "kg/day")

            output_commodity_quantities['jet'] = Q_(
                ag_fuel_yield_dict[commodity][process][1], "oil_bbl / day")
            output_commodity_quantities['diesel'] = Q_(
                ag_fuel_yield_dict[commodity][process][2], "oil_bbl / day")
            output_commodity_quantities['naphtha'] = Q_(
                ag_fuel_yield_dict[commodity][process][3], "oil_bbl / day")
            output_commodity_quantities['aromatics'] = Q_(
                ag_fuel_yield_dict[commodity][process][4], "oil_bbl / day")
            output_commodity_quantities['total_fuel'] = Q_(
                ag_fuel_yield_dict[commodity][process][6], "oil_bbl / day")

        else:

            logger.error(
                "the commodity {} has no process {} in the AFPAT agricultural yield dictionary"
                .format(commodity, process))
            raise Exception(
                "the commodity {} has no process {} in the AFPAT agricultural yield dictionary"
                .format(commodity, process))

    elif commodity in bioWasteDict:

        if process in bioWasteDict[commodity]:

            input_commodity_quantities = Q_(
                bioWasteDict[commodity][process][1], "kg / year")

            output_commodity_quantities['total_fuel'] = Q_(
                bioWasteDict[commodity][process][4], "oil_bbl / day")
            output_commodity_quantities['jet'] = Q_(
                bioWasteDict[commodity][process][5], "oil_bbl / day")
            output_commodity_quantities['diesel'] = Q_(
                bioWasteDict[commodity][process][6], "oil_bbl / day")
            output_commodity_quantities['naphtha'] = Q_(
                bioWasteDict[commodity][process][7], "oil_bbl / day")
            output_commodity_quantities['aromatics'] = Q_(
                0.000, "oil_bbl / day")

        else:

            logger.debug(
                "the process {} for commodity {} is not in the biowaste yield dictionary {}"
                .format(process, commodity, bioWasteDict[commodity]))
            logger.error(
                "the commodity {} has no process {} in the AFPAT biowaste yield dictionary"
                .format(commodity, process))
            raise Exception(
                "the commodity {} has no process {} in the AFPAT fossilResources yield dictionary"
                .format(commodity, process))

    # DO THE FOSSIL RESOURCES
    fossil_keys = list(fossilResources.keys())

    for key in fossil_keys:

        if commodity.find(key) > -1:

            if process in fossilResources[key]:

                input_commodity_quantities = Q_(500e3, "oil_bbl / day")

                output_commodity_quantities['total_fuel'] = Q_(
                    fossilResources[key][process][4], "oil_bbl / day")
                output_commodity_quantities['jet'] = Q_(
                    fossilResources[key][process][5], "oil_bbl / day")
                output_commodity_quantities['diesel'] = Q_(
                    fossilResources[key][process][6], "oil_bbl / day")
                output_commodity_quantities['naphtha'] = Q_(
                    fossilResources[key][process][7], "oil_bbl / day")
                output_commodity_quantities['aromatics'] = Q_(
                    0.000, "oil_bbl / day")

            else:

                logger.error(
                    "the commodity {} has no process {} in the AFPAT fossilResources yield dictionary"
                    .format(commodity, process))
                raise Exception(
                    "the commodity {} has no process {} in the AFPAT fossilResources yield dictionary"
                    .format(commodity, process))

    if input_commodity_quantities == 0 or output_commodity_quantities == {}:
        logger.error(
            "the commodity {} and process {} is not in the AFPAT agricultural, biowaste, or fossil fuel yield dictionaries"
            .format(commodity, process))
        raise Exception(
            "the commodity {} and process {} is not in the AFPAT agricultural, biowaste, or fossil fuel yield dictionaries"
            .format(commodity, process))

    return input_commodity_quantities, output_commodity_quantities
Ejemplo n.º 6
0
def load_scenario_config_file(fullPathToXmlConfigFile, fullPathToXmlSchemaFile,
                              logger):

    if not os.path.exists(fullPathToXmlConfigFile):
        raise IOError(
            "XML Scenario File {} not found at specified location.".format(
                fullPathToXmlConfigFile))

    if fullPathToXmlConfigFile.rfind(".xml") < 0:
        raise IOError("XML Scenario File {} is not an XML file type.".format(
            fullPathToXmlConfigFile))

    if not os.path.exists(fullPathToXmlSchemaFile):
        raise IOError(
            "XML Schema File not found at {}".format(fullPathToXmlSchemaFile))

    xmlScenarioFile = minidom.parse(fullPathToXmlConfigFile)

    # Validate XML scenario against XML schema
    schemaObj = etree.XMLSchema(etree.parse(fullPathToXmlSchemaFile))
    xmlFileObj = etree.parse(fullPathToXmlConfigFile)
    validationResult = schemaObj.validate(xmlFileObj)

    logger.debug("validate XML scenario against XML schema")
    if validationResult == False:
        logger.warning(
            "XML scenario validation failed. Error messages to follow.")
        for error in schemaObj.error_log:
            logger.warning("ERROR ON LINE: {} - ERROR MESSAGE: {}".format(
                error.line, error.message))

        raise Exception(
            "XML Scenario File does not meet the requirements in the XML schema file."
        )

    # initialize scenario ojbect
    logger.debug("initialize scenario object")
    scenario = Scenario()

    # Check the scenario schema version (current version is specified in FTOT.py under VERSION_NUMBER global var)
    logger.debug("validate schema version is correct")
    scenario.scenario_schema_version = xmlScenarioFile.getElementsByTagName(
        'Scenario_Schema_Version')[0].firstChild.data

    #if not str(VERSION_NUMBER) == str(scenario.scenario_schema_version):
    if not str(VERSION_NUMBER).split(".")[0:2] == str(
            scenario.scenario_schema_version).split(".")[0:2]:
        error = "XML Schema File Version is {}. Expected version {}. " \
                "Use the XML flag to run the XML upgrade tool. " \
                .format(str(scenario.scenario_schema_version), str(VERSION_NUMBER))
        logger.error(error)
        raise Exception(error)

    scenario.scenario_name = xmlScenarioFile.getElementsByTagName(
        'Scenario_Name')[0].firstChild.data
    scenario.scenario_description = xmlScenarioFile.getElementsByTagName(
        'Scenario_Description')[0].firstChild.data
    #scenario.scenario_name = getElementFromXmlFile(xmlScenarioFile, Scenario_Name):

    # SCENARIO INPUTS SECTION
    # ----------------------------------------------------------------------------------------
    scenario.common_data_folder = xmlScenarioFile.getElementsByTagName(
        'Common_Data_Folder')[0].firstChild.data
    scenario.base_network_gdb = xmlScenarioFile.getElementsByTagName(
        'Base_Network_Gdb')[0].firstChild.data

    scenario.base_rmp_layer = xmlScenarioFile.getElementsByTagName(
        'Base_RMP_Layer')[0].firstChild.data
    scenario.base_destination_layer = xmlScenarioFile.getElementsByTagName(
        'Base_Destination_Layer')[0].firstChild.data
    scenario.base_processors_layer = xmlScenarioFile.getElementsByTagName(
        'Base_Processors_Layer')[0].firstChild.data

    scenario.rmp_commodity_data = xmlScenarioFile.getElementsByTagName(
        'RMP_Commodity_Data')[0].firstChild.data
    scenario.destinations_commodity_data = xmlScenarioFile.getElementsByTagName(
        'Destinations_Commodity_Data')[0].firstChild.data
    scenario.processors_commodity_data = xmlScenarioFile.getElementsByTagName(
        'Processors_Commodity_Data')[0].firstChild.data
    scenario.processors_candidate_slate_data = xmlScenarioFile.getElementsByTagName(
        'Processors_Candidate_Commodity_Data')[0].firstChild.data
    # note: the processor_candidates_data is defined under other since it is not a user specified file.
    # Adding Scenario schedule as file path if it exists, or otherwise as None
    if len(xmlScenarioFile.getElementsByTagName('Schedule_Data')):
        scenario.schedule = xmlScenarioFile.getElementsByTagName(
            'Schedule_Data')[0].firstChild.data
    else:
        logger.debug("Schedule_Data field not specified. Defaulting to None.")
        scenario.schedule = "None"  # using string instead of NoneType to match when user manually sets to None
    # Adding commodity-mode as file path if it exists, or otherwise as None
    if len(xmlScenarioFile.getElementsByTagName('Commodity_Mode_Data')):
        scenario.commodity_mode_data = xmlScenarioFile.getElementsByTagName(
            'Commodity_Mode_Data')[0].firstChild.data
    else:
        logger.debug(
            "Commodity_Mode_Data field not specified. Defaulting to None.")
        scenario.commodity_mode_data = "None"  # using string instead of NoneType to match when user manually sets to None
    logger.debug("scenario commodity_mode_data attribute set to: " +
                 scenario.commodity_mode_data)
    # v 5.0 10/12/18 user specified default units for the liquid and solid phase commodities
    # use pint to set the default units
    logger.debug("test: setting the default units with pint")
    try:
        scenario.default_units_solid_phase = Q_(
            xmlScenarioFile.getElementsByTagName('Default_Units_Solid_Phase')
            [0].firstChild.data).units
        scenario.default_units_liquid_phase = Q_(
            xmlScenarioFile.getElementsByTagName('Default_Units_Liquid_Phase')
            [0].firstChild.data).units
        logger.debug("PASS: setting the default units with pint")
    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # ASSUMPTIONS SECTION
    # ----------------------------------------------------------------------------------------

    # solid and liquid vehicle loads
    try:

        logger.debug(
            "test: setting the vehicle loads for solid phase of matter pint")
        scenario.truck_load_solid = Q_(
            xmlScenarioFile.getElementsByTagName('Truck_Load_Solid')
            [0].firstChild.data).to(scenario.default_units_solid_phase)
        scenario.railcar_load_solid = Q_(
            xmlScenarioFile.getElementsByTagName('Railcar_Load_Solid')
            [0].firstChild.data).to(scenario.default_units_solid_phase)
        scenario.barge_load_solid = Q_(
            xmlScenarioFile.getElementsByTagName('Barge_Load_Solid')
            [0].firstChild.data).to(scenario.default_units_solid_phase)

        logger.debug(
            "test: setting the vehicle loads for liquid phase of matter with pint"
        )
        scenario.truck_load_liquid = Q_(
            xmlScenarioFile.getElementsByTagName('Truck_Load_Liquid')
            [0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.railcar_load_liquid = Q_(
            xmlScenarioFile.getElementsByTagName('Railcar_Load_Liquid')
            [0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.barge_load_liquid = Q_(
            xmlScenarioFile.getElementsByTagName('Barge_Load_Liquid')
            [0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.pipeline_crude_load_liquid = Q_(
            xmlScenarioFile.getElementsByTagName('Pipeline_Crude_Load_Liquid')
            [0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.pipeline_prod_load_liquid = Q_(
            xmlScenarioFile.getElementsByTagName('Pipeline_Prod_Load_Liquid')
            [0].firstChild.data).to(scenario.default_units_liquid_phase)
        logger.debug("PASS: the setting the vehicle loads with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    scenario.truckFuelEfficiency = float(
        xmlScenarioFile.getElementsByTagName(
            'Truck_Fuel_Efficiency_MilesPerGallon')[0].firstChild.data)
    scenario.railFuelEfficiency = float(
        xmlScenarioFile.getElementsByTagName(
            'Rail_Fuel_Efficiency_MilesPerGallon')[0].firstChild.data)
    scenario.bargeFuelEfficiency = float(
        xmlScenarioFile.getElementsByTagName(
            'Barge_Fuel_Efficiency_MilesPerGallon')[0].firstChild.data)
    scenario.CO2urbanUnrestricted = float(
        xmlScenarioFile.getElementsByTagName('Atmos_CO2_Urban_Unrestricted')
        [0].firstChild.data)
    scenario.CO2urbanRestricted = float(
        xmlScenarioFile.getElementsByTagName('Atmos_CO2_Urban_Restricted')
        [0].firstChild.data)
    scenario.CO2ruralUnrestricted = float(
        xmlScenarioFile.getElementsByTagName('Atmos_CO2_Rural_Unrestricted')
        [0].firstChild.data)
    scenario.CO2ruralRestricted = float(
        xmlScenarioFile.getElementsByTagName('Atmos_CO2_Rural_Restricted')
        [0].firstChild.data)
    scenario.railroadCO2Emissions = float(
        xmlScenarioFile.getElementsByTagName(
            'Railroad_CO2_Emissions_g_ton_mile')[0].firstChild.data)
    scenario.bargeCO2Emissions = float(
        xmlScenarioFile.getElementsByTagName('Barge_CO2_Emissions_g_ton_mile')
        [0].firstChild.data)
    scenario.pipelineCO2Emissions = float(
        xmlScenarioFile.getElementsByTagName(
            'Pipeline_CO2_Emissions_g_ton_mile')[0].firstChild.data)

    # SCRIPT PARAMETERS SECTION FOR NETWORK
    # ----------------------------------------------------------------------------------------

    #rail costs
    scenario.solid_railroad_class_1_cost = format_number(
        xmlScenarioFile.getElementsByTagName('solid_Railroad_Class_I_Cost')
        [0].firstChild.data)
    scenario.liquid_railroad_class_1_cost = format_number(
        xmlScenarioFile.getElementsByTagName('liquid_Railroad_Class_I_Cost')
        [0].firstChild.data)

    # rail penalties
    scenario.rail_dc_7 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_7_Weight')
        [0].firstChild.data)
    scenario.rail_dc_6 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_6_Weight')
        [0].firstChild.data)
    scenario.rail_dc_5 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_5_Weight')
        [0].firstChild.data)
    scenario.rail_dc_4 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_4_Weight')
        [0].firstChild.data)
    scenario.rail_dc_3 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_3_Weight')
        [0].firstChild.data)
    scenario.rail_dc_2 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_2_Weight')
        [0].firstChild.data)
    scenario.rail_dc_1 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_1_Weight')
        [0].firstChild.data)
    scenario.rail_dc_0 = format_number(
        xmlScenarioFile.getElementsByTagName('Rail_Density_Code_0_Weight')
        [0].firstChild.data)

    #truck costs
    scenario.solid_truck_base_cost = format_number(
        xmlScenarioFile.getElementsByTagName('solid_Truck_Base_Cost')
        [0].firstChild.data)
    scenario.liquid_truck_base_cost = format_number(
        xmlScenarioFile.getElementsByTagName('liquid_Truck_Base_Cost')
        [0].firstChild.data)

    # road penalties
    scenario.truck_interstate = format_number(
        xmlScenarioFile.getElementsByTagName('Truck_Interstate_Weight')
        [0].firstChild.data)
    scenario.truck_pr_art = format_number(
        xmlScenarioFile.getElementsByTagName('Truck_Principal_Arterial_Weight')
        [0].firstChild.data)
    scenario.truck_m_art = format_number(
        xmlScenarioFile.getElementsByTagName('Truck_Minor_Arterial_Weight')
        [0].firstChild.data)
    scenario.truck_local = format_number(
        xmlScenarioFile.getElementsByTagName('Truck_Local_Weight')
        [0].firstChild.data)

    # barge costs
    scenario.solid_barge_cost = format_number(
        xmlScenarioFile.getElementsByTagName('solid_Barge_cost')
        [0].firstChild.data)
    scenario.liquid_barge_cost = format_number(
        xmlScenarioFile.getElementsByTagName('liquid_Barge_cost')
        [0].firstChild.data)

    # water penalties
    scenario.water_high_vol = format_number(
        xmlScenarioFile.getElementsByTagName('Water_High_Volume_Weight')
        [0].firstChild.data)
    scenario.water_med_vol = format_number(
        xmlScenarioFile.getElementsByTagName('Water_Medium_Volume_Weight')
        [0].firstChild.data)
    scenario.water_low_vol = format_number(
        xmlScenarioFile.getElementsByTagName('Water_Low_Volume_Weight')
        [0].firstChild.data)
    scenario.water_no_vol = format_number(
        xmlScenarioFile.getElementsByTagName('Water_No_Volume_Weight')
        [0].firstChild.data)

    scenario.transloading_dollars_per_ton = format_number(
        xmlScenarioFile.getElementsByTagName('transloading_dollars_per_ton')
        [0].firstChild.data)
    scenario.transloading_dollars_per_thousand_gallons = format_number(
        xmlScenarioFile.getElementsByTagName(
            'transloading_dollars_per_thousand_gallons')[0].firstChild.data)

    scenario.road_max_artificial_link_dist = xmlScenarioFile.getElementsByTagName(
        'Road_Max_Artificial_Link_Distance_Miles')[0].firstChild.data
    scenario.rail_max_artificial_link_dist = xmlScenarioFile.getElementsByTagName(
        'Rail_Max_Artificial_Link_Distance_Miles')[0].firstChild.data
    scenario.water_max_artificial_link_dist = xmlScenarioFile.getElementsByTagName(
        'Water_Max_Artificial_Link_Distance_Miles')[0].firstChild.data
    scenario.pipeline_crude_max_artificial_link_dist = xmlScenarioFile.getElementsByTagName(
        'Pipeline_Crude_Max_Artificial_Link_Distance_Miles')[0].firstChild.data
    scenario.pipeline_prod_max_artificial_link_dist = xmlScenarioFile.getElementsByTagName(
        'Pipeline_Products_Max_Artificial_Link_Distance_Miles'
    )[0].firstChild.data

    # RUN ROUTE OPTIMIZATION SCRIPT SECTION
    # ----------------------------------------------------------------------------------------

    scenario.permittedModes = []
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[
            0].getElementsByTagName('Road')[0].firstChild.data == "True":
        scenario.permittedModes.append("road")
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[
            0].getElementsByTagName('Rail')[0].firstChild.data == "True":
        scenario.permittedModes.append("rail")
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[
            0].getElementsByTagName('Water')[0].firstChild.data == "True":
        scenario.permittedModes.append("water")

    #TO DO ALO-- 10/17/2018-- make below compatible with distinct crude/product pipeline approach-- ftot_pulp.py changes will be needed.
    if xmlScenarioFile.getElementsByTagName(
            'Permitted_Modes')[0].getElementsByTagName(
                'Pipeline_Crude')[0].firstChild.data == "True":
        scenario.permittedModes.append("pipeline_crude_trf_rts")
    if xmlScenarioFile.getElementsByTagName(
            'Permitted_Modes')[0].getElementsByTagName(
                'Pipeline_Prod')[0].firstChild.data == "True":
        scenario.permittedModes.append("pipeline_prod_trf_rts")

    if xmlScenarioFile.getElementsByTagName(
            'Capacity_On')[0].firstChild.data == "True":
        scenario.capacityOn = True
    else:
        scenario.capacityOn = False

    scenario.backgroundFlowModes = []
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[
            0].getElementsByTagName('Road')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("road")
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[
            0].getElementsByTagName('Rail')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("rail")
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[
            0].getElementsByTagName('Water')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("water")

    #TO DO ALO-- 10/17/2018-- make below compatible with distinct crude/product pipeline approach-- ftot_pulp.py changes will be needed.
    if xmlScenarioFile.getElementsByTagName(
            'Background_Flows')[0].getElementsByTagName(
                'Pipeline_Crude')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("pipeline")
    if xmlScenarioFile.getElementsByTagName(
            'Background_Flows')[0].getElementsByTagName(
                'Pipeline_Prod')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("pipeline")

    scenario.minCapacityLevel = float(
        xmlScenarioFile.getElementsByTagName('Minimum_Capacity_Level')
        [0].firstChild.data)

    scenario.unMetDemandPenalty = float(
        xmlScenarioFile.getElementsByTagName(
            'Penalty_For_Not_Fulfilling_Depot_Demand')[0].firstChild.data)
    scenario.maxSeconds = int(
        xmlScenarioFile.getElementsByTagName('maxSeconds')[0].firstChild.data)
    scenario.fracGap = float(
        xmlScenarioFile.getElementsByTagName('fracGap')[0].firstChild.data)

    # OTHER
    # ----------------------------------------------------------------------------------------

    scenario.scenario_run_directory = os.path.dirname(fullPathToXmlConfigFile)

    scenario.main_db = os.path.join(scenario.scenario_run_directory, "main.db")
    scenario.main_gdb = os.path.join(scenario.scenario_run_directory,
                                     "main.gdb")

    scenario.rmp_fc = os.path.join(scenario.main_gdb, "raw_material_producers")
    scenario.destinations_fc = os.path.join(scenario.main_gdb,
                                            "ultimate_destinations")
    scenario.processors_fc = os.path.join(scenario.main_gdb, "processors")
    scenario.processor_candidates_fc = os.path.join(
        scenario.main_gdb, "all_candidate_processors")
    scenario.locations_fc = os.path.join(scenario.main_gdb, "locations")

    # this file is generated by the processor_candidates() method
    scenario.processor_candidates_commodity_data = os.path.join(
        scenario.scenario_run_directory, "debug",
        "ftot_generated_processor_candidates.csv")

    # this is the directory to store the shp files that a programtically generated for the networkx read_shp method
    scenario.lyr_files_dir = os.path.join(scenario.scenario_run_directory,
                                          "temp_networkx_shp_files")

    return scenario
Ejemplo n.º 7
0
def make_rmp_as_proc_slate(the_scenario, commodity_name,
                           commodity_quantity_with_units, logger):
    # we're going to query the database for facilities named candidate* (wildcard)
    # and use their product slate ratio to return a fuel_dictrionary.
    sql = """ select f.facility_name, c.commodity_name, fc.quantity, fc.units, c.phase_of_matter, fc.io 
                from facility_commodities fc
                join facilities f on f.facility_id = fc.facility_id
                join commodities c on c.commodity_id = fc.commodity_id
                where facility_name like 'candidate%' 
          """

    input_commodities = {}
    output_commodities = {}
    scaled_output_dict = {}
    with sqlite3.connect(the_scenario.main_db) as db_con:

        db_cur = db_con.cursor()
        db_cur.execute(sql)

        for row in db_cur:
            facility_name = row[0]
            a_commodity_name = row[1]
            quantity = row[2]
            units = row[3]
            phase_of_matter = row[4]
            io = row[5]

            if io == 'i':
                if not facility_name in list(input_commodities.keys()):
                    input_commodities[facility_name] = []

                input_commodities[facility_name].append(
                    [a_commodity_name, quantity, units, phase_of_matter, io])

            elif io == 'o':
                if not facility_name in list(output_commodities.keys()):
                    output_commodities[facility_name] = []
                output_commodities[facility_name].append(
                    [a_commodity_name, quantity, units, phase_of_matter, io])
            elif io == 'maxsize' or io == 'minsize':
                logger.detailed_debug("io flag == maxsize or min size")
            elif io == 'cost_formula':
                logger.detailed_debug("io flag == cost_formula")
            else:
                logger.warning(
                    "the io flag: {} is not recognized for commodity: {} - at facility: {}"
                    .format(io, a_commodity_name, facility_name))

    # check if there is more than one input commodity
    for facility in input_commodities:
        if not len(input_commodities[facility]) == 1:
            logger.warning(
                "there are: {} input commodities in the product slate for facility {}"
                .format(len(input_commodities), facility_name))
            for an_input_commodity in input_commodities[facility]:
                logger.warning(
                    "commodity_name: {}, quantity: {}, units: {}, io: {}".
                    format(an_input_commodity[0], an_input_commodity[1],
                           an_input_commodity[2], an_input_commodity[3]))
        else:  # there is only one input commodity to use in the ratio.

            # check if this is the ratio we want to save
            if input_commodities[facility][0][0].lower(
            ) == commodity_name.lower():

                a_commodity_name = input_commodities[facility][0][0]
                quantity = input_commodities[facility][0][1]
                units = input_commodities[facility][0][2]
                input_commodity_quantity_with_units = Q_(quantity, units)

                # store all the output commodities
                for an_output_commodity in output_commodities[facility_name]:
                    a_commodity_name = an_output_commodity[0]
                    quantity = an_output_commodity[1]
                    units = an_output_commodity[2]
                    phase_of_matter = an_output_commodity[3]
                    output_commodity_quantity_with_units = Q_(quantity, units)

                    # the output commodity quantity is divided by the input
                    # commodity quantity specified in the candidate slate csv
                    # and then multiplied by the commodity_quantity_with_units
                    # factor from the RMP passed into this module
                    oc = output_commodity_quantity_with_units
                    ic = input_commodity_quantity_with_units
                    cs = commodity_quantity_with_units

                    # finally add it to the scaled output dictionary
                    scaled_output_dict[a_commodity_name] = [(oc / ic * cs),
                                                            phase_of_matter]

    return scaled_output_dict
Ejemplo n.º 8
0
def load_facility_commodities_input_data(the_scenario, commodity_input_file,
                                         logger):
    logger.debug("start: load_facility_commodities_input_data")
    if not os.path.exists(commodity_input_file):
        logger.warning("warning: cannot find commodity_input file: {}".format(
            commodity_input_file))
        return

    # create a temp dict to store values from CSV
    temp_facility_commodities_dict = {}

    # read through facility_commodities input CSV
    import csv
    with open(commodity_input_file, 'rb') as f:

        reader = csv.DictReader(f)
        for row in reader:
            # re: issue #149 -- if the line is empty, just skip it
            if row.values()[0] == '':
                logger.debug(
                    'the CSV file has a blank in the first column. Skipping this line: {}'
                    .format(row.values()))
                continue

            # {'units': 'kgal', 'facility_name': 'd:01053', 'phase_of_matter': 'liquid', 'value': '9181.521484', 'commodity': 'diesel', 'io': 'o',
            #             'share_max_transport_distance'; 'Y'}
            io = row["io"]
            facility_name = str(row["facility_name"])
            facility_type = row["facility_type"]
            commodity_name = row["commodity"].lower(
            )  # re: issue #131 - make all commodities lower case
            commodity_quantity = row["value"]
            commodity_unit = str(row["units"]).replace(
                ' ', '_').lower()  # remove spaces and make units lower case
            commodity_phase = row["phase_of_matter"]

            if "max_transport_distance" in row.keys():
                commodity_max_transport_distance = row[
                    "max_transport_distance"]  # leave out and sqlite will
            else:
                commodity_max_transport_distance = "Null"
            if "share_max_transport_distance" in row.keys():
                share_max_transport_distance = row[
                    "share_max_transport_distance"]
            else:
                share_max_transport_distance = 'N'

            # use pint to set the commodity quantity and units
            commodity_quantity_and_units = Q_(float(commodity_quantity),
                                              commodity_unit)

            # 7/9/18 - convert the input commodities into FTOT units
            # 10/12/18 - mnp - adding user default units by phase of matter.
            if commodity_phase.lower() == 'liquid':
                commodity_unit = the_scenario.default_units_liquid_phase
            if commodity_phase.lower() == 'solid':
                commodity_unit = the_scenario.default_units_solid_phase

            if commodity_name == 'cost_formula':
                pass
            else:
                commodity_quantity = commodity_quantity_and_units.to(
                    commodity_unit).magnitude

            # add to the dictionary of facility_commodities mapping
            if not facility_name in temp_facility_commodities_dict.keys():
                temp_facility_commodities_dict[facility_name] = []

            temp_facility_commodities_dict[facility_name].append([
                facility_type, commodity_name, commodity_quantity,
                commodity_unit, commodity_phase,
                commodity_max_transport_distance, io,
                share_max_transport_distance
            ])

    logger.debug("finished: load_facility_commodities_input_data")
    return temp_facility_commodities_dict
Ejemplo n.º 9
0
def make_commodity_density_dict(the_scenario, logger, isEmissionsReporting):

    # This method is called twice, once for vehicle attributes post-processing and then again for detailed emissions reporting.
    # Boolean isEmissionsReporting should be True during the second call of this method (for the detailed emissions reporting)
    # to suppress duplicative logger statements

    logger.debug("start: make_commodity_density_dict")

    # Query commodities
    commodity_names = []
    with sqlite3.connect(the_scenario.main_db) as main_db_con:
        commodities = main_db_con.execute(
            "select commodity_name from commodities where commodity_name <> 'multicommodity';"
        )
        commodities = commodities.fetchall()
        for name in commodities:
            commodity_names.append(name[0])

    # Initialize density dict with default density factor
    density_dict = dict([(comm, the_scenario.densityFactor)
                         for comm in commodity_names])

    # Use default if input file set to None
    if the_scenario.commodity_density_data == "None":
        logger.info(
            'Commodity density file not specified. Defaulting to density {}'.
            format(the_scenario.densityFactor))
        return density_dict

    # Read through densities csv
    with open(the_scenario.commodity_density_data, 'r') as cd:
        line_num = 1
        for line in cd:
            if line_num == 1:
                pass  # do nothing
            else:
                flds = line.rstrip('\n').split(',')
                commodity = flds[0].lower()
                density = flds[1]

                # Check commodity
                if commodity not in commodity_names:
                    logger.warning(
                        "Commodity: {} in commodity_density_data is not recognized."
                        .format(commodity))
                    continue  # skip this commodity

                # Assign default density if commodity has blank density
                # Otherwise do unit conversion
                if density == "":
                    density = the_scenario.densityFactor
                else:
                    density = Q_(density).to('{}/{}'.format(
                        the_scenario.default_units_solid_phase,
                        the_scenario.default_units_liquid_phase))

                # Populate dictionary
                density_dict[commodity] = density

            line_num += 1

    if not isEmissionsReporting:  # so that display only once
        for commodity in density_dict:
            logger.debug("Commodity: {}, Density: {}".format(
                commodity, density_dict[commodity]))

    return density_dict
Ejemplo n.º 10
0
def processor_candidates(the_scenario, logger):
    # 12/22/18 - new processor candidates code based on the FTOTv5
    # candidate generation tables generated at the end of the first optimization step
    # -----------------------------------------------------------------------------

    logger.info("start: generate_processor_candidates")

    # use candidate_nodes, candidate_process_list,
    # and candidate_process_commodities to create the output
    # product slate and candidate facility information including:
    # (min_size, max_size, cost_formula)

    with sqlite3.connect(the_scenario.main_db) as main_db_con:

        # clean-up candidate_processors table
        # ------------------------------------
        logger.debug("drop the candidate_processors table")
        main_db_con.execute("drop table if exists candidate_processors;")
        main_db_con.commit()

        # create the candidate_processors table
        # with the appropriate XY shape information from the nodeID
        # ---------------------------------------------------------
        logger.debug("create the candidate_processors table")
        main_db_con.executescript("""create table candidate_processors as 
                select 
                    xy.shape_x shape_x, 
                    xy.shape_y shape_y, 
                    cpl.process_name || '_' || cn.node_id facility_name, 
                    cpl.process_id process_id,
                    cpc.commodity_name commodity_name, 
                    cpc.commodity_id commodity_id,
                    cn.'agg_value:1' as quantity, 
                    cpc.units units, 
                    cpc.io io,
                    c.phase_of_matter phase_of_matter
                    from candidate_nodes cn
                    join candidate_process_commodities cpc on cpc.commodity_id = cn.commodity_id and cpc.process_id = cn.process_id
                    join candidate_process_list cpl on cpl.process_id = cn.process_id
                    join networkx_nodes xy on cn.node_id = xy.node_id
                    join commodities c on c.commodity_id = cpc.commodity_id
                    group by xy.shape_x, 
                        xy.shape_y, 
                        facility_name, 
                        cpl.process_id,
                        cpc.commodity_name, 
                        cpc.commodity_id,
                        quantity, 
                        cpc.units, 
                        cpc.io,
                        c.phase_of_matter;
                ;""")
        main_db_con.commit()

    # generate the product slates for the candidate locations
    # first get a dictionary of output commodity scalars per unit of input
    # output_dict[process_name]['i'].append([commodity_name, Q_(quantity, units)])
    # output_dict[process_name]['o'].append([commodity_name, Q_(quantity, units), phase_of_matter])

    output_dict = get_candidate_processor_slate_output_ratios(
        the_scenario, logger)

    logger.info("opening a csv file")
    with open(the_scenario.processor_candidates_commodity_data, 'w') as wf:

        # write the header line
        header_line = "facility_name,facility_type,commodity,value,units,phase_of_matter,io"
        wf.write(str(header_line + "\n"))

        ## WRITE THE CSV FILE OF THE PROCESSOR CANDIDATES PRODUCT SLATE

        sql = """ 
            select 
                facility_name, 'processor', commodity_name, quantity, units, phase_of_matter, io, cpl.process_name
            from candidate_processors cp
            join candidate_process_list cpl on cpl.process_id = cp.process_id            
        ;"""
        db_cur = main_db_con.execute(sql)
        db_data = db_cur.fetchall()
        for row in db_data:

            facility_name = row[0]
            facility_type = row[1]
            commodity_name = row[2]
            input_quantity = float(row[3])
            input_units = row[4]
            phase_of_matter = row[5]
            io = row[6]
            process_name = row[7]

            # logger.info("writing input commodity: {} and demand: {} \t {}".format(row[2], row[3], row[4]))
            wf.write("{},{},{},{},{},{},{}\n".format(row[0], row[1], row[2],
                                                     row[3], row[4], row[5],
                                                     row[6]))

            # write the scaled output commodities too
            # first get the input for the denomenator
            input_scaler_quantity = output_dict[process_name]['i'][0][1]
            # then get the output scaler for the numerator
            for output_scaler in output_dict[process_name]['o']:
                output_commodity_name = output_scaler[0]
                output_scaler_quantity = output_scaler[1]
                output_phase_of_matter = output_scaler[2]
                output_quantity = Q_(
                    input_quantity, input_units
                ) * output_scaler_quantity / input_scaler_quantity
                wf.write("{},{},{},{},{},{},{}\n".format(
                    row[0], row[1], output_commodity_name,
                    output_quantity.magnitude, output_quantity.units,
                    output_phase_of_matter, 'o'))

    # MAKE THE FIRST PROCESSOR POINT LAYER
    # this layer consists of candidate nodes where flow exceeds the min facility size at a RMP,
    # or flow aggregates on the network above the min_facility size (anywhere it gets bigger)
    # ---------------------------------------------------------------------------------------------
    logger.info(
        "create a feature class with all the candidate processor locations: all_candidate_processors"
    )
    scenario_gdb = the_scenario.main_gdb
    all_candidate_processors_fc = os.path.join(scenario_gdb,
                                               "all_candidate_processors")

    if arcpy.Exists(all_candidate_processors_fc):
        arcpy.Delete_management(all_candidate_processors_fc)
        logger.debug(
            "deleted existing {} layer".format(all_candidate_processors_fc))

    arcpy.CreateFeatureclass_management(scenario_gdb,
                                        "all_candidate_processors", "POINT",
                                        "#", "DISABLED", "DISABLED",
                                        ftot_supporting_gis.LCC_PROJ)

    # add fields and set capacity and prefunded fields.
    # ---------------------------------------------------------------------
    arcpy.AddField_management(all_candidate_processors_fc, "facility_name",
                              "TEXT")
    arcpy.AddField_management(all_candidate_processors_fc, "candidate",
                              "SHORT")
    fields = ("SHAPE@X", "SHAPE@Y", "facility_name", "candidate")
    icursor = arcpy.da.InsertCursor(all_candidate_processors_fc, fields)

    main_scenario_gdb = the_scenario.main_gdb

    ### THIS IS WERE WE CHANGE THE CODE ###
    ### DO A NEW SQL QUERY TO GROUP BY FACILITY_NAME AND GET THE SHAPE_X, SHAPE_Y, PROCESS, ETC.
    with sqlite3.connect(the_scenario.main_db) as main_db_con:
        sql = """
                select shape_x, shape_y, facility_name
                from candidate_processors
                group by facility_name
            ;"""
        db_cur = main_db_con.execute(sql)
        db_data = db_cur.fetchall()

    for candidate_processor in db_data:
        shape_x = float(candidate_processor[0])
        shape_y = float(candidate_processor[1])
        facility_name = candidate_processor[2]
        # offset slightly from the network node
        offset_x = random.randrange(100, 250, 25)
        offset_y = random.randrange(100, 250, 25)
        shape_x += offset_x
        shape_y += offset_y

        icursor.insertRow([shape_x, shape_y, facility_name, 1])

    del icursor

    return
Ejemplo n.º 11
0
def get_candidate_processor_slate_output_ratios(the_scenario, logger):
    logger.info("start: get_candidate_processor_slate_output_ratios")
    output_dict = {}
    with sqlite3.connect(the_scenario.main_db) as db_con:
        # first get the input commodities and quantities
        sql = """ 
            select 
                cpl.process_id, 
                cpl.process_name, 
                cpc.io, 
                cpc.commodity_name, 
                cpc.commodity_id,
                quantity,
                units
            from candidate_process_commodities cpc
            join candidate_process_list cpl on cpl.process_id = cpc.process_id
            where cpc.io = 'i'
        ;"""

        db_cur = db_con.execute(sql)
        db_data = db_cur.fetchall()

        for row in db_data:
            process_name = row[1]
            io = row[2]
            commodity_name = row[3]
            commodity_id = row[4]
            quantity = float(row[5])
            units = row[6]

            if process_name not in output_dict.keys():
                output_dict[process_name] = {}
                output_dict[process_name]['i'] = []
                output_dict[process_name]['o'] = [
                ]  # initialize the output dict at the same time
            output_dict[process_name]['i'].append(
                [commodity_name, Q_(quantity, units)])

            # next get the output commodities and quantities and scale them by the input quantities
        # e.g. output scaled = output / input
        sql = """ 
            select 
                cpl.process_id, 
                cpl.process_name, 
                cpc.io, 
                cpc.commodity_name, 
                cpc.commodity_id,
                cpc.quantity,
                cpc.units,
                c.phase_of_matter
            from candidate_process_commodities cpc
            join candidate_process_list cpl on cpl.process_id = cpc.process_id
            join commodities c on c.commodity_id = cpc.commodity_id
            where cpc.io = 'o'
        ;"""

        db_cur = db_con.execute(sql)
        db_data = db_cur.fetchall()

        for row in db_data:
            process_name = row[1]
            io = row[2]
            commodity_name = row[3]
            commodity_id = row[4]
            quantity = float(row[5])
            units = row[6]
            phase_of_matter = row[7]

            output_dict[process_name]['o'].append(
                [commodity_name,
                 Q_(quantity, units), phase_of_matter])

    for process in output_dict:
        if 1 != len(output_dict[process]['i']):
            logger.warning(
                "there is more than one input commodity specified for this process!!"
            )
        if 0 == len(output_dict[process]['i']):
            logger.warning(
                "there are no input commodities specified for this process!!")
        if 0 == len(output_dict[process]['o']):
            logger.warning(
                "there are no output commodities specified for this process!!")

    return output_dict
Ejemplo n.º 12
0
def load_scenario_config_file(fullPathToXmlConfigFile, fullPathToXmlSchemaFile, logger):

    if not os.path.exists(fullPathToXmlConfigFile):
        raise IOError("XML Scenario File {} not found at specified location.".format(fullPathToXmlConfigFile))

    if fullPathToXmlConfigFile.rfind(".xml") < 0:
        raise IOError("XML Scenario File {} is not an XML file type.".format(fullPathToXmlConfigFile))

    if not os.path.exists(fullPathToXmlSchemaFile):
        raise IOError("XML Schema File not found at {}".format(fullPathToXmlSchemaFile))

    xmlScenarioFile = minidom.parse(fullPathToXmlConfigFile)

    # Validate XML scenario against XML schema
    schemaObj = etree.XMLSchema(etree.parse(fullPathToXmlSchemaFile))
    xmlFileObj = etree.parse(fullPathToXmlConfigFile)
    validationResult = schemaObj.validate(xmlFileObj)

    logger.debug("validate XML scenario against XML schema")
    if validationResult == False:
        logger.warning("XML scenario validation failed. Error messages to follow.")
        for error in schemaObj.error_log:
            logger.warning("ERROR ON LINE: {} - ERROR MESSAGE: {}".format(error.line, error.message))

        raise Exception("XML Scenario File does not meet the requirements in the XML schema file.")

    # initialize scenario ojbect
    logger.debug("initialize scenario object")
    scenario = Scenario()

    # Check the scenario schema version (current version is specified in FTOT.py under VERSION_NUMBER global var)
    logger.debug("validate schema version is correct")
    scenario.scenario_schema_version = xmlScenarioFile.getElementsByTagName('Scenario_Schema_Version')[0].firstChild.data

    #if not str(VERSION_NUMBER) == str(scenario.scenario_schema_version):
    if not str(SCHEMA_VERSION).split(".")[0:2] == str(scenario.scenario_schema_version).split(".")[0:2]:
        error = "XML Schema File Version is {}. Expected version {}. " \
                "Use the XML flag to run the XML upgrade tool. " \
                .format(str(scenario.scenario_schema_version), str(SCHEMA_VERSION))
        logger.error(error)
        raise Exception(error)

    scenario.scenario_name = xmlScenarioFile.getElementsByTagName('Scenario_Name')[0].firstChild.data
    # Convert any commas in the scenario name to dashes
    warning = "Replace any commas in the scenario name with dashes to accomodate csv files."
    logger.debug(warning)
    scenario.scenario_name = scenario.scenario_name.replace(",", "-")
    scenario.scenario_description = xmlScenarioFile.getElementsByTagName('Scenario_Description')[0].firstChild.data

    # SCENARIO INPUTS SECTION
    # ----------------------------------------------------------------------------------------
    scenario.common_data_folder = xmlScenarioFile.getElementsByTagName('Common_Data_Folder')[0].firstChild.data
    scenario.base_network_gdb = xmlScenarioFile.getElementsByTagName('Base_Network_Gdb')[0].firstChild.data
    scenario.disruption_data = xmlScenarioFile.getElementsByTagName('Disruption_Data')[0].firstChild.data
    scenario.base_rmp_layer = xmlScenarioFile.getElementsByTagName('Base_RMP_Layer')[0].firstChild.data
    scenario.base_destination_layer = xmlScenarioFile.getElementsByTagName('Base_Destination_Layer')[0].firstChild.data
    scenario.base_processors_layer = xmlScenarioFile.getElementsByTagName('Base_Processors_Layer')[0].firstChild.data

    scenario.rmp_commodity_data = xmlScenarioFile.getElementsByTagName('RMP_Commodity_Data')[0].firstChild.data
    scenario.destinations_commodity_data = xmlScenarioFile.getElementsByTagName('Destinations_Commodity_Data')[0].firstChild.data
    scenario.processors_commodity_data = xmlScenarioFile.getElementsByTagName('Processors_Commodity_Data')[0].firstChild.data
    scenario.processors_candidate_slate_data = xmlScenarioFile.getElementsByTagName('Processors_Candidate_Commodity_Data')[0].firstChild.data
    # note: the processor_candidates_data is defined under other since it is not a user specified file.
    scenario.schedule = xmlScenarioFile.getElementsByTagName('Schedule_Data')[0].firstChild.data
    scenario.commodity_mode_data = xmlScenarioFile.getElementsByTagName('Commodity_Mode_Data')[0].firstChild.data

    # use pint to set the default units
    logger.debug("test: setting the default units with pint")
    try:
        scenario.default_units_solid_phase = Q_(xmlScenarioFile.getElementsByTagName('Default_Units_Solid_Phase')[0].firstChild.data).units
        scenario.default_units_liquid_phase = Q_(xmlScenarioFile.getElementsByTagName('Default_Units_Liquid_Phase')[0].firstChild.data).units
        logger.debug("PASS: setting the default units with pint")
    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # ASSUMPTIONS SECTION
    # ----------------------------------------------------------------------------------------

    # solid and liquid vehicle loads
    try:

        logger.debug("test: setting the vehicle loads for solid phase of matter with pint")
        scenario.truck_load_solid = Q_(xmlScenarioFile.getElementsByTagName('Truck_Load_Solid')[0].firstChild.data).to(scenario.default_units_solid_phase)
        scenario.railcar_load_solid = Q_(xmlScenarioFile.getElementsByTagName('Railcar_Load_Solid')[0].firstChild.data).to(scenario.default_units_solid_phase)
        scenario.barge_load_solid = Q_(xmlScenarioFile.getElementsByTagName('Barge_Load_Solid')[0].firstChild.data).to(scenario.default_units_solid_phase)

        logger.debug("test: setting the vehicle loads for liquid phase of matter with pint")
        scenario.truck_load_liquid = Q_(xmlScenarioFile.getElementsByTagName('Truck_Load_Liquid')[0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.railcar_load_liquid = Q_(xmlScenarioFile.getElementsByTagName('Railcar_Load_Liquid')[0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.barge_load_liquid = Q_(xmlScenarioFile.getElementsByTagName('Barge_Load_Liquid')[0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.pipeline_crude_load_liquid = Q_(xmlScenarioFile.getElementsByTagName('Pipeline_Crude_Load_Liquid')[0].firstChild.data).to(scenario.default_units_liquid_phase)
        scenario.pipeline_prod_load_liquid = Q_(xmlScenarioFile.getElementsByTagName('Pipeline_Prod_Load_Liquid')[0].firstChild.data).to(scenario.default_units_liquid_phase)
        logger.debug("PASS: setting the vehicle loads with pint passed")

        logger.debug("test: setting the vehicle fuel efficiencies with pint")
        scenario.truckFuelEfficiency = Q_(xmlScenarioFile.getElementsByTagName('Truck_Fuel_Efficiency')[0].firstChild.data).to('mi/gal')
        scenario.railFuelEfficiency = Q_(xmlScenarioFile.getElementsByTagName('Rail_Fuel_Efficiency')[0].firstChild.data).to('mi/gal')
        scenario.bargeFuelEfficiency = Q_(xmlScenarioFile.getElementsByTagName('Barge_Fuel_Efficiency')[0].firstChild.data).to('mi/gal')
        logger.debug("PASS: setting the vehicle fuel efficiencies with pint passed")

        logger.debug("test: setting the vehicle emission factors with pint")
        scenario.CO2urbanUnrestricted = Q_(xmlScenarioFile.getElementsByTagName('Atmos_CO2_Urban_Unrestricted')[0].firstChild.data).to('g/mi')
        scenario.CO2urbanRestricted = Q_(xmlScenarioFile.getElementsByTagName('Atmos_CO2_Urban_Restricted')[0].firstChild.data).to('g/mi')
        scenario.CO2ruralUnrestricted = Q_(xmlScenarioFile.getElementsByTagName('Atmos_CO2_Rural_Unrestricted')[0].firstChild.data).to('g/mi')
        scenario.CO2ruralRestricted = Q_(xmlScenarioFile.getElementsByTagName('Atmos_CO2_Rural_Restricted')[0].firstChild.data).to('g/mi')        
        scenario.railroadCO2Emissions = Q_(xmlScenarioFile.getElementsByTagName('Railroad_CO2_Emissions')[0].firstChild.data).to('g/{}/mi'.format(scenario.default_units_solid_phase))
        scenario.bargeCO2Emissions = Q_(xmlScenarioFile.getElementsByTagName('Barge_CO2_Emissions')[0].firstChild.data).to('g/{}/mi'.format(scenario.default_units_solid_phase))
        scenario.pipelineCO2Emissions = Q_(xmlScenarioFile.getElementsByTagName('Pipeline_CO2_Emissions')[0].firstChild.data).to('g/{}/mi'.format(scenario.default_units_solid_phase))
        # setting density conversion based on 'Density_Conversion_Factor' field if it exists, or otherwise default to 3.33 ton/kgal
        if len(xmlScenarioFile.getElementsByTagName('Density_Conversion_Factor')):
            scenario.densityFactor = Q_(xmlScenarioFile.getElementsByTagName('Density_Conversion_Factor')[0].firstChild.data).to('{}/{}'.format(scenario.default_units_solid_phase, scenario.default_units_liquid_phase))
        else:
            logger.warning("FTOT is assuming a density of 3.33 ton/kgal for emissions reporting for liquids. Use scenario XML parameter 'Density_Conversion_Factor' to adjust this value.")
            scenario.densityFactor = Q_('3.33 ton/kgal').to('{}/{}'.format(scenario.default_units_solid_phase, scenario.default_units_liquid_phase))
        logger.debug("PASS: setting the vehicle emission factors with pint passed")
    
    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))
    
    # Setting flag for detailed emissions reporting if it exists, or otherwise default to 'False'
    if len(xmlScenarioFile.getElementsByTagName('Detailed_Emissions_Reporting')):
        if xmlScenarioFile.getElementsByTagName('Detailed_Emissions_Reporting')[0].firstChild.data == "True":
            scenario.detailed_emissions = True
        else:
            scenario.detailed_emissions = False
    else:
        logger.debug("Detailed_Emissions_Reporting field not specified. Defaulting to False.")
        scenario.detailed_emissions = False

        # SCRIPT PARAMETERS SECTION FOR NETWORK
    # ----------------------------------------------------------------------------------------

    # rail costs
    try:
        logger.debug("test: setting the base costs for rail with pint")
        scenario.solid_railroad_class_1_cost = Q_(xmlScenarioFile.getElementsByTagName('solid_Railroad_Class_I_Cost')[0].firstChild.data).to("usd/{}/mile".format(scenario.default_units_solid_phase))
        scenario.liquid_railroad_class_1_cost = Q_(xmlScenarioFile.getElementsByTagName('liquid_Railroad_Class_I_Cost')[0].firstChild.data).to("usd/{}/mile".format(scenario.default_units_liquid_phase))
        logger.debug("PASS: setting the base costs for rail with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # rail penalties
    scenario.rail_dc_7 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_7_Weight')[0].firstChild.data)
    scenario.rail_dc_6 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_6_Weight')[0].firstChild.data)
    scenario.rail_dc_5 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_5_Weight')[0].firstChild.data)
    scenario.rail_dc_4 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_4_Weight')[0].firstChild.data)
    scenario.rail_dc_3 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_3_Weight')[0].firstChild.data)
    scenario.rail_dc_2 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_2_Weight')[0].firstChild.data)
    scenario.rail_dc_1 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_1_Weight')[0].firstChild.data)
    scenario.rail_dc_0 = format_number(xmlScenarioFile.getElementsByTagName('Rail_Density_Code_0_Weight')[0].firstChild.data)

    # truck costs
    try:
        logger.debug("test: setting the base costs for truck with pint")
        scenario.solid_truck_base_cost = Q_(xmlScenarioFile.getElementsByTagName('solid_Truck_Base_Cost')[0].firstChild.data).to("usd/{}/mile".format(scenario.default_units_solid_phase))
        scenario.liquid_truck_base_cost = Q_(xmlScenarioFile.getElementsByTagName('liquid_Truck_Base_Cost')[0].firstChild.data).to("usd/{}/mile".format(scenario.default_units_liquid_phase))
        logger.debug("PASS: setting the base costs for truck with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # road penalties
    scenario.truck_interstate = format_number(xmlScenarioFile.getElementsByTagName('Truck_Interstate_Weight')[0].firstChild.data)
    scenario.truck_pr_art = format_number(xmlScenarioFile.getElementsByTagName('Truck_Principal_Arterial_Weight')[0].firstChild.data)
    scenario.truck_m_art = format_number(xmlScenarioFile.getElementsByTagName('Truck_Minor_Arterial_Weight')[0].firstChild.data)
    scenario.truck_local = format_number(xmlScenarioFile.getElementsByTagName('Truck_Local_Weight')[0].firstChild.data)

    # barge costs
    try:
        logger.debug("test: setting the base costs for barge with pint")
        scenario.solid_barge_cost = Q_(xmlScenarioFile.getElementsByTagName('solid_Barge_cost')[0].firstChild.data).to("usd/{}/mile".format(scenario.default_units_solid_phase))
        scenario.liquid_barge_cost = Q_(xmlScenarioFile.getElementsByTagName('liquid_Barge_cost')[0].firstChild.data).to("usd/{}/mile".format(scenario.default_units_liquid_phase))
        logger.debug("PASS: setting the base costs for barge with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # water penalties
    scenario.water_high_vol = format_number(xmlScenarioFile.getElementsByTagName('Water_High_Volume_Weight')[0].firstChild.data)
    scenario.water_med_vol = format_number(xmlScenarioFile.getElementsByTagName('Water_Medium_Volume_Weight')[0].firstChild.data)
    scenario.water_low_vol = format_number(xmlScenarioFile.getElementsByTagName('Water_Low_Volume_Weight')[0].firstChild.data)
    scenario.water_no_vol = format_number(xmlScenarioFile.getElementsByTagName('Water_No_Volume_Weight')[0].firstChild.data)

    # transloading costs
    try:
        logger.debug("test: setting the transloading costs with pint")
        scenario.solid_transloading_cost = Q_(xmlScenarioFile.getElementsByTagName('solid_Transloading_Cost')[0].firstChild.data).to("usd/{}".format(scenario.default_units_solid_phase))
        scenario.liquid_transloading_cost = Q_(xmlScenarioFile.getElementsByTagName('liquid_Transloading_Cost')[0].firstChild.data).to("usd/{}".format(scenario.default_units_liquid_phase))
        logger.debug("PASS: setting the transloading costs with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # artificial link distances
    try:
        logger.debug("test: setting the artificial link distances with pint")
        scenario.road_max_artificial_link_dist = Q_(xmlScenarioFile.getElementsByTagName('Road_Max_Artificial_Link_Distance')[0].firstChild.data).to('mi')
        scenario.rail_max_artificial_link_dist = Q_(xmlScenarioFile.getElementsByTagName('Rail_Max_Artificial_Link_Distance')[0].firstChild.data).to('mi')
        scenario.water_max_artificial_link_dist = Q_(xmlScenarioFile.getElementsByTagName('Water_Max_Artificial_Link_Distance')[0].firstChild.data).to('mi')
        scenario.pipeline_crude_max_artificial_link_dist = Q_(xmlScenarioFile.getElementsByTagName('Pipeline_Crude_Max_Artificial_Link_Distance')[0].firstChild.data).to('mi')
        scenario.pipeline_prod_max_artificial_link_dist = Q_(xmlScenarioFile.getElementsByTagName('Pipeline_Products_Max_Artificial_Link_Distance')[0].firstChild.data).to('mi')
        logger.debug("PASS: setting the artificial link distances with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # short haul penalties
    try:
        logger.debug("test: setting the short haul penalties with pint")
        scenario.liquid_rail_short_haul_penalty = Q_(xmlScenarioFile.getElementsByTagName('liquid_Rail_Short_Haul_Penalty')[0].firstChild.data).to("usd/{}".format(scenario.default_units_liquid_phase))
        scenario.solid_rail_short_haul_penalty = Q_(xmlScenarioFile.getElementsByTagName('solid_Rail_Short_Haul_Penalty')[0].firstChild.data).to("usd/{}".format(scenario.default_units_solid_phase))
        scenario.liquid_water_short_haul_penalty = Q_(xmlScenarioFile.getElementsByTagName('liquid_Water_Short_Haul_Penalty')[0].firstChild.data).to("usd/{}".format(scenario.default_units_liquid_phase))
        scenario.solid_water_short_haul_penalty = Q_(xmlScenarioFile.getElementsByTagName('solid_Water_Short_Haul_Penalty')[0].firstChild.data).to("usd/{}".format(scenario.default_units_solid_phase))
        logger.debug("PASS: setting the short haul penalties with pint passed")

    except Exception as e:
        logger.error("FAIL: {} ".format(e))
        raise Exception("FAIL: {}".format(e))

    # RUN ROUTE OPTIMIZATION SCRIPT SECTION
    # ----------------------------------------------------------------------------------------

    # Setting flag for network density reduction based on 'NDR_On' field
    if xmlScenarioFile.getElementsByTagName('NDR_On')[0].firstChild.data == "True":
        scenario.ndrOn = True
    else:
        scenario.ndrOn = False

    scenario.permittedModes = []
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[0].getElementsByTagName('Road')[0].firstChild.data == "True":
        scenario.permittedModes.append("road")
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[0].getElementsByTagName('Rail')[0].firstChild.data == "True":
        scenario.permittedModes.append("rail")
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[0].getElementsByTagName('Water')[0].firstChild.data == "True":
        scenario.permittedModes.append("water")

    # TODO ALO-- 10/17/2018-- make below compatible with distinct crude/product pipeline approach-- ftot_pulp.py changes will be needed.
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[0].getElementsByTagName('Pipeline_Crude')[0].firstChild.data == "True":
        scenario.permittedModes.append("pipeline_crude_trf_rts")
    if xmlScenarioFile.getElementsByTagName('Permitted_Modes')[0].getElementsByTagName('Pipeline_Prod')[0].firstChild.data == "True":
        scenario.permittedModes.append("pipeline_prod_trf_rts")

    if xmlScenarioFile.getElementsByTagName('Capacity_On')[0].firstChild.data == "True":
        scenario.capacityOn = True
    else:
        scenario.capacityOn = False

    scenario.backgroundFlowModes = []
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[0].getElementsByTagName('Road')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("road")
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[0].getElementsByTagName('Rail')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("rail")
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[0].getElementsByTagName('Water')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("water")

    if xmlScenarioFile.getElementsByTagName('Background_Flows')[0].getElementsByTagName('Pipeline_Crude')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("pipeline")
    if xmlScenarioFile.getElementsByTagName('Background_Flows')[0].getElementsByTagName('Pipeline_Prod')[0].firstChild.data == "True":
        scenario.backgroundFlowModes.append("pipeline")

    scenario.minCapacityLevel = float(xmlScenarioFile.getElementsByTagName('Minimum_Capacity_Level')[0].firstChild.data)

    scenario.unMetDemandPenalty = float(xmlScenarioFile.getElementsByTagName('Unmet_Demand_Penalty')[0].firstChild.data)
     
    # OTHER
    # ----------------------------------------------------------------------------------------

    scenario.scenario_run_directory = os.path.dirname(fullPathToXmlConfigFile)

    scenario.main_db = os.path.join(scenario.scenario_run_directory, "main.db")
    scenario.main_gdb = os.path.join(scenario.scenario_run_directory, "main.gdb")

    scenario.rmp_fc          = os.path.join(scenario.main_gdb, "raw_material_producers")
    scenario.destinations_fc = os.path.join(scenario.main_gdb, "ultimate_destinations")
    scenario.processors_fc   = os.path.join(scenario.main_gdb, "processors")
    scenario.processor_candidates_fc = os.path.join(scenario.main_gdb, "all_candidate_processors")
    scenario.locations_fc    = os.path.join(scenario.main_gdb, "locations")

    # this file is generated by the processor_candidates() method
    scenario.processor_candidates_commodity_data = os.path.join(scenario.scenario_run_directory, "debug", "ftot_generated_processor_candidates.csv")

    # this is the directory to store the shp files that a programtically generated for the networkx read_shp method
    scenario.networkx_files_dir = os.path.join(scenario.scenario_run_directory, "temp_networkx_shp_files")

    return scenario