def __should_read_table(self, input_file_abs, table_id): """ Checks the following: * the InputFile (absolute) is a valid file * the ID of the Table is unique (not an existing Table ID) Args: input_file_abs (str): the full pathname to the input data file table_id (str): the ID of the output Table Returns: Boolean. If TRUE, the reading process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input file is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "InputFile", input_file_abs, "FAIL")) # If the TableID is the same as an already-existing TableID, raise a WARNING or FAILURE (depends on the # value of the IfTableIDExists parameter.) should_run_command.append( validators.run_check(self, "IsTableIdUnique", "TableID", table_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_write_geolayer(self, geolayer_id, output_file_abs): """ Checks the following: * the ID of the GeoLayer is an existing GeoLayer ID * the output folder is a valid folder Args: geolayer_id: the ID of the GeoLayer to be written output_file_abs: the full pathname to the output file Returns: Boolean. If TRUE, the GeoLayer should be written. If FALSE, at least one check failed and the GeoLayer should not be written. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayer ID is not an existing GeoLayer ID, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsGeoLayerIdExisting", "GeoLayerID", geolayer_id, "FAIL")) # If the folder of the OutputFile file path is not a valid folder, raise a FAILURE. should_run_command.append(validators.run_check(self, "DoesFilePathHaveAValidFolder", "OutputFile", output_file_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_read_table(self, file_abs, sheet_name, table_id): """ Checks the following: * the InputFile (absolute) is a valid file * the Worksheet is a valid sheet in the Excel workbook * the ID of the Table is unique (not an existing Table ID) Args: file_abs (str): the full pathname to the input data file (Excel workbook) sheet_name (str): the name of the Excel worksheet to read table_id (str): the ID of the output Table Returns: Boolean. If TRUE, the GeoLayer should be read. If FALSE, at least one check failed and the GeoLayer should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input file is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "InputFile", file_abs, "FAIL")) # If the input file is valid, continue with the checks. if False not in should_run_command: # If the Worksheet parameter is None, assign it with the name of the first worksheet in the excel file. if sheet_name is None: sheet_name = pandas_util.create_excel_workbook_obj( file_abs).sheet_names[0] # If the input sheet name is not a valid sheet name in the excel workbook file, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsExcelSheetNameValid", "Worksheet", sheet_name, "FAIL", other_values=[file_abs])) # If the TableID parameter is None, assign the parameter with the sheet name. if table_id is None: table_id = sheet_name # If the TableID is the same as an already-existing TableID, raise a WARNING or FAILURE (depends on the # value of the IfTableIDExists parameter.) should_run_command.append( validators.run_check(self, "IsTableIdUnique", "TableID", table_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_extract_file(self, file_abs, output_folder_abs, file_type): """ Checks the following: * the File is a valid file * the OutputFolder is a valid folder * the FileType correctly identifies the File's type Args: file_abs (str): the full path to the input compressed File output_folder_abs(str): the full path to the OutputFolder file_type(str): the FileType value depicting the file type of the input File Returns: Boolean. If TRUE, the file should be extracted. If FALSE, at least one check failed and the file should not be extracted. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the File parameter value is not a valid file, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "File", file_abs, "FAIL")) # If the OutputFolder parameter value is not a valid folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "OutputFolder", output_folder_abs, "FAIL")) # If the File Type is not recognized, raise a FAILURE. if file_type is None: message = "A valid FileType cannot be determined from the file ({}).".format( file_abs) recommendation = "Use the FileType parameter to assign the appropriate file type." should_run_command.append(False) self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the File Type is not actually recognized by the input File, raise a FAILURE. if file_type.upper() == "ZIP": should_run_command.append( validators.run_check(self, "IsZipFile", "File", file_abs, "FAIL")) elif file_type.upper() == "TAR": should_run_command.append( validators.run_check(self, "IsTarFile", "File", file_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_write_table(self, table_id, output_file_abs): """ Checks the following: * the ID of the Table is an existing Table ID * the output folder is a valid folder Args: table_id: the ID of the Table to be written output_file_abs: the full pathname to the output file Returns: run_write: Boolean. If TRUE, the writing process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the Table ID is not an existing Table ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableIdExisting", "TableID", table_id, "FAIL")) # Get the full path to the output folder output_folder_abs = io_util.get_path(output_file_abs) # If the output folder is not an existing folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "OutputFile", output_folder_abs, "FAIL")) # Continue if the output file is an existing file. if os.path.exists(output_folder_abs): if io_util.get_extension(output_file_abs).upper() == ".XLS": message = 'At the current time, a Table object cannot be appended to or overwrite an existing Excel ' \ 'file in XLS format.' recommendation = "Update the XLS file ({}) to an XLSX file or write the table " \ "to a new XLS file.".format(output_file_abs) self.warning_count += 1 self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) should_run_command.append(False) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_geolayer_be_created(self, geolayer_id, crs, geometry_format, geometry_data): """ Checks the following: * the CRS is a valid CRS * the ID of the new GeoLayer is unique (not an existing GeoLayer ID) * if the GeometryFormat is BoundingBox, check that the string has 4 items Args: geolayer_id: the id of the GeoLayer to be created crs: the crs code of the GeoLayer to be created geometry_format: the format that the geometry data is delivered geometry_data: the geometry data (as a string) Returns: Boolean. If TRUE, the GeoLayer should be simplified If FALSE, at least one check failed and the GeoLayer should not be simplified. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the CRS is not a valid coordinate reference system code, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsCRSCodeValid", "CRS", crs, "FAIL")) # If the new GeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE # (depends on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "NewGeoLayerID", geolayer_id, None)) # If the GeometryFormat is BoundingBox, continue with the checks. if geometry_format.upper() == "BOUNDINGBOX": # If the GeometryData string does not contain 4 items when converted to a list, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsListLengthCorrect", "GeometryData", geometry_data, "FAIL", other_values=[",", 4])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_read_geolayer(self, geolayer_id, one_geolayer_bool, fc_name=None, spatial_data_folder_abs=None): """ Checks the following: * if only one geolayer is being read, the FeatureClass is an existing feature class within the File GeoDatabase * the ID of the output GeoLayer is unique (not an existing GeoLayer ID) Args: geolayer_id (str): the ID of the output GeoLayer one_geolayer_bool (bool): if True, the command is only reading one FC from the FGDB fc_name (str): the name of the FC being read. Only used if one_geolayer_bool is True. Default = None spatial_data_folder_abs (str): the full pathname to the input spatial data folder. Only used if one_geolayer_bool is True. Default = None Returns: Boolean. If TRUE, the GeoLayer should be read. If FALSE, at least one check failed and the GeoLayer should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends # on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "GeoLayerID", geolayer_id, None)) # If only one geolayer is being read from the file geodatabase, continue. if one_geolayer_bool: # If the provided feature class is not in the FGDB raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFeatureClassInFGDB", "FeatureClass", fc_name, "FAIL", other_values=[spatial_data_folder_abs])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_close_datastore(self, datastore_id): """ Checks the following: * the DataStore ID is an existing DataStore ID Args: datastore_id (str): the ID of the DataStore to close Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_geolayer_be_deleted(self, geolayer_id_list): """ Checks the following: * the IDs of the input GeoLayers are existing GeoLayer IDs Args: geolayer_id_list: an id list of the GeoLayers to be removed Returns: Boolean. If TRUE, the file should be extracted. If FALSE, at least one check failed and the file should not be extracted. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # Iterate over the GeoLayerIDs in the GeoLayerIDList. for geolayer_id in geolayer_id_list: # If the geolayer_id is not a valid GeoLayer ID, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsGeoLayerIdExisting", "GeoLayerID", geolayer_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_simplify_geolayer(self, geolayer_id, simplified_geolayer_id): """ Checks the following: * the ID of the GeoLayer is an existing GeoLayer ID * the geolayer is either a POLYGON or LINE type * the ID of the SimplifedGeoLayerID is unique (not an existing GeoLayer ID) Args: geolayer_id: the ID of the GeoLayer to be simplified Returns: Boolean. If TRUE, the GeoLayer should be simplified If FALSE, at least one check failed and the GeoLayer should not be simplified. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayerID is not an existing GeoLayerID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsGeoLayerIDExisting", "GeoLayerID", geolayer_id, "FAIL")) # If the GeoLayer does not have POLYGON or LINE geometry, raise a FAILURE. should_run_command.append( validators.run_check(self, "DoesGeoLayerIdHaveCorrectGeometry", "GeoLayerID", geolayer_id, "FAIL", other_values=[["Polygon", "LineString"]])) # If the SimplifiedGeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends # on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "SimplifiedGeoLayerID", simplified_geolayer_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_read_table(self, sql_file_abs, table_id, datastore_id): """ Checks the following: * the SqlFile (absolute) is a valid file, if not None * the ID of the Table is unique (not an existing Table ID) * the DataStore exists Args: sql_file_abs (str): the full pathname to the sql file table_id (str): the ID of the output Table datastore_id (str): the ID of the DataStore to read Returns: Boolean. If TRUE, the reading process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # Only run following check if SqlFile method is being used. if sql_file_abs: # If the SqlFile is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "SqlFile", sql_file_abs, "FAIL")) # If the TableID is the same as an already-existing TableID, raise a WARNING or FAILURE (depends on the # value of the IfTableIDExists parameter.) should_run_command.append( validators.run_check(self, "IsTableIdUnique", "TableID", table_id, None)) # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_extract_file(self, file_abs, output_folder_abs, file_type): """ Checks the following: * the File is a valid file * the OutputFolder is a valid folder * the FileType correctly identifies the File's type Args: file_abs (str): the full path to the input compressed File output_folder_abs(str): the full path to the OutputFolder file_type(str): the FileType value depicting the file type of the input File Returns: Boolean. If TRUE, the file should be extracted. If FALSE, at least one check failed and the file should not be extracted. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the File parameter value is not a valid file, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsFilePathValid", "File", file_abs, "FAIL")) # If the OutputFolder parameter value is not a valid folder, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsFolderPathValid", "OutputFolder", output_folder_abs, "FAIL")) # If the File Type is not actually recognized by the input File, raise a FAILURE. if file_type == "ZIP": should_run_command.append(validators.run_check(self, "IsZipFile", "File", file_abs, "FAIL")) elif file_type == "TAR": should_run_command.append(validators.run_check(self, "IsTarFile", "File", file_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_read_gdb(self, spatial_data_folder_abs): """ Checks the following: * the SpatialDataFolder (absolute) is a valid folder * the SpatialDataFolder (absolute) is a valid File GeoDatabase Args: spatial_data_folder_abs (str): the full pathname to the input spatial data folder Returns: Boolean. If TRUE, the GeoDatabase should be read. If FALSE, at least one check failed and the GeoDatabase should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input spatial data folder is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "SpatialDataFolder", spatial_data_folder_abs, "FAIL")) # If the input spatial data folder is valid, continue with the checks. if False not in should_run_command: # If the input spatial data folder is not a file geodatabase, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderAfGDB", "SpatialDataFolder", spatial_data_folder_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_open_datastore(self, datastore_id, file_path_abs, if_datastore_id_exists): """ Checks the following: * the DataStore ID is unique * the ConfigFile is a valid file, if applicable * the DataStore is closed, if configured to open an existing DataStore Args: datastore_id (str): the ID of the DataStore to open/create file_path_abs (str): the absolute path the configuration file. Will be None if the "Parameters configure datastore" method is to be used. if_datastore_id_exists (str): Determines the action that occurs if the DataStoreID already exists. Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the DataStoreID is the same as an already-existing DataStoreID, raise a WARNING, FAILURE or IGNORE # (depends on the value of the IfDataStoreIDExists parameter.) should_run_command.append( validators.run_check(self, "IsDataStoreIdUnique", "DataStoreID", datastore_id, None)) # If the "Configuration file configures datastore" method is to be used, continue with check. if file_path_abs is not None: # If the configuration file does not exists, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "File", file_path_abs, "FAIL")) # If the DataStoreID already exists and the IfDataStoreIDExists parameter is set to "Open", continue. if self.command_processor.get_datastore( datastore_id) and if_datastore_id_exists.upper() == "OPEN": # Get the DataStore object. obj = self.command_processor.get_datastore(datastore_id) # Check that the existing DataStore is closed. if obj.is_connected: self.warning_count += 1 message = "The DataStore ({}) is already open.".format( datastore_id) recommendation = "Specify a DataStoreID of a closed DataStore." self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) should_run_command.append(False) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_write_table(self, table_id, output_file_abs, delimiter, sort_columns): """ Checks the following: * the ID of the Table is an existing Table ID * the output folder is a valid folder * check that the delimiter is only one character * check that the columns within the SortColumns are existing columns Args: table_id: the ID of the Table to be written output_file_abs: the full pathname to the output file delimiter: the delimiter string that will separate each column in the output file sort_columns: a list of table columns used to sort the records Returns: run_write: Boolean. If TRUE, the writing process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the Table ID is not an existing Table ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableIdExisting", "TableID", table_id, "FAIL")) # If the Table ID does exist and the sort_columns is not None, continue with checks. if True in should_run_command and sort_columns is not None: # Get the Table object table = self.command_processor.get_table(table_id) # Get a list of the columns in the table. columns = table.return_fieldnames() # If one of the SortingColumns does not exist in the Table, raise a FAILURE. invalid_columns = [] for sort_column in sort_columns: if sort_column not in columns: invalid_columns.append(sort_column) if invalid_columns: message = 'The SortColumns ({}) are not columns in the table ({}).'.format( invalid_columns, table_id) recommendation = 'Specify columns within the Table. \nValid columns: {}'.format( columns) self.warning_count += 1 self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) should_run_command.append(False) # Get the full path to the output folder output_folder_abs = io_util.get_path(output_file_abs) # If the output folder is not an existing folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "OutputFile", output_folder_abs, "FAIL")) # If the delimiter is not 1 character, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsStringLengthCorrect", "Delimiter", delimiter, "FAIL", other_values=[1])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_write_table(self, table_id, datastore_id, datastore_table_name, writemode): """ Checks the following: * the Table ID exists * the DataStore ID exists * the DataStore table exists if the writemode is ExistingTableInsert, ExistingTableUpdate, or ExistingTableInsertUpdate * the DataStore table does not exist if the writemode starts with NewTable Args: table_id (str): the ID of the Table to write datastore_id (str): the ID of the DataStore to receive data datastore_table_name (str): the name of the DataStore table to receive data writemode (str): the method used to write data Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Only run the following check if the previous check passed. if False not in should_run_command: if writemode.upper().startswith( "EXISTING" ) and not writemode.upper().endswith("OVERWRITE"): # If the DataStoreTable is not a table within the DataStore, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableInDataStore", "DataStoreTable", datastore_table_name, "FAIL", other_values=[datastore_id])) if writemode.upper().startswith("NEW"): # If the DataStoreTable is a table within the DataStore, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreTableUnique", "DataStoreTable", datastore_table_name, "FAIL", other_values=[datastore_id])) # If the Table ID is not an existing Table ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableIdExisting", "TableID", table_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_write_geolayer(self, geolayer_id, output_file_abs, crs, output_geom_format): """ Checks the following: * the ID of the GeoLayer is an existing GeoLayer ID * the output folder is a valid folder * the feature geometry is "POINT" if the geometry format parameter format is not "WKT" * the CRS is a valid CRS code Args: geolayer_id (str): the ID of the GeoLayer to be written output_file_abs (str): the full pathname (absolute) to the output CSV file (without the extension) crs (str): the desired coordinate reference system of the output spatial data output_geom_format (str): the geometry format to display the spatial data into CSV format Returns: Boolean. If TRUE, the GeoLayer should be written. If FALSE, at least one check failed and the GeoLayer should not be written. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayer ID is not an existing GeoLayer ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsGeoLayerIdExisting", "GeoLayerID", geolayer_id, "FAIL")) # If the GeoLayerID exists, continue with the checks. if False not in should_run_command: # If the output_geometry_format is not 'WKT', continue with the checks. if not output_geom_format.upper() == "WKT": # If the GeoLayer does not have POINT geometry, raise a FAILURE. should_run_command.append( validators.run_check(self, "DoesGeoLayerIdHaveCorrectGeometry", "GeoLayerID", geolayer_id, "FAIL", other_values=[["Point"]])) # Get the CRS of the input GeoLayer, if the parameter value for OutputCRS is None. if crs is None: crs = self.command_processor.get_geolayer( geolayer_id).get_crs() # If the CRS is not a valid CRS code, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsCRSCodeValid", "OutputCRS", crs, "FAIL")) # If the folder of the OutputFile file path is not a valid folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "DoesFilePathHaveAValidFolder", "OutputFile", output_file_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def __should_read_geolayer(self, delimited_file, delimiter, geom_format, x_col, y_col, wkt_col, crs, geolayer_id): """ Checks the following: * the DelimitedFile (absolute) is a valid file * if the CSV is using XY coordinates * -- > the XColumn is an actual field name * -- > the YColumn is an actual field name * if the CSV if using WKT geometries * -- > the WKTColumn is an actual field name * the CRS code is a valid code * the ID of the output GeoLayer is unique (not an existing GeoLayer ID) Args: * delimited_file (str, required): The absolute path to the delimited file to be read. * delimiter (str, required): The delimiter symbol used in the delimited file. Often times a comma. * geom_format (str): The format of the geometry representation in the delimited file. Either `WKT` or `XY`. * x_col (str): The name of the delimited file column that holds the x coordinate data. * y_col (str): The name of the delimited file column that holds the y coordinate data. * crs (str, EPSG format): The coordinate reference system code associated with the X and Y coordinates. * geolayer_id (str): the GeoLayer identifier. Returns: Boolean. If TRUE, the geolayer should be read. If FALSE, the geolayer should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input DelimitedFile is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "DelimitedFile", delimited_file, "FAIL")) # If the Delimited File exists, continue with the following checks. if should_run_command[0] is True: # If the geometry format is "XY", continue. if geom_format.upper() == "XY": # If the XColumn is not an existing column name in the delimited file, raise a FAILURE. should_run_command.append( validators.run_check( self, "IsDelimitedFileColumnNameValid", "XColumn", x_col, "FAIL", other_values=[delimited_file, delimiter])) # If the YColumn is not an existing column name in the delimited file, raise a FAILURE. should_run_command.append( validators.run_check( self, "IsDelimitedFileColumnNameValid", "YColumn", y_col, "FAIL", other_values=[delimited_file, delimiter])) # If the geometry format is "WKT", continue. else: # If the WKTColumn is not an existing column name in the delimited file, raise a FAILURE. should_run_command.append( validators.run_check( self, "IsDelimitedFileColumnNameValid", "WKTColumn", wkt_col, "FAIL", other_values=[delimited_file, delimiter])) # If the input CRS code is not a valid coordinate reference code, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsCrsCodeValid", "CRS", crs, "FAIL")) # If the GeoLayer ID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends on the # value of the IfGeoLayerIDExists parameter.) The required, the IfGeoLayerIDExists parameter value is retrieved # inside run_check function. should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "GeoLayerID", geolayer_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True
def check_command_input(self, input_geolayer_id, attribute_name, output_geolayer_ids): """ Checks the following: * the ID of the input GeoLayer is an existing GeoLayer ID * the attribute name is a valid name for the GeoLayer (if not, log an error message & do not continue.) * the IDs of the output GeoLayers are unique (not an existing GeoLayer ID) Args: input_geolayer_id: the ID of the input GeoLayer attribute_name (str): the name of the attribute in which to split the GeoLayer output_geolayer_ids: the IDs of the output GeoLayers Returns: Boolean. If TRUE, the GeoLayer should be split. If FALSE, at least one check failed and the GeoLayer should not be split. """ logger = logging.getLogger(__name__) # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input GeoLayerID is not an existing GeoLayerID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsGeoLayerIDExisting", "InputGeoLayerID", input_geolayer_id, "FAIL")) # If the input GeoLayer exists, continue with the checks. if False not in should_run_command: # Get the input GeoLayer object. input_geolayer = self.command_processor.get_geolayer( input_geolayer_id) # Get the attribute names of the input GeoLayer. list_of_attributes = input_geolayer.get_attribute_field_names() for i_attribute_name in list_of_attributes: logger.info('Input layer has attribute "' + str(i_attribute_name) + '"') # If the attribute name is not valid, raise a FAILURE. if attribute_name not in list_of_attributes: self.warning_count += 1 message = 'The attribute name ({}) is not valid.'.format( attribute_name) recommendation = 'Specify a valid attribute name. Valid attributes for this layer are as follows: ' \ '{}'.format(list_of_attributes) self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) logger.info('Found attribute "' + attribute_name + '" in input layer attributes') # If the OutputGeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends # on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "OutputGeoLayerID", output_geolayer_ids, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True else: return True logger.info('Process can be run')
def __should_list_files(self, folder_abs, url_abs, list_files_bool, list_dirs_bool, list_property): """ Checks the following: * the URL/Folder is valid. * either the ListFiles or ListFolders (or both) are set to TRUE. Raise a WARNING, does not cause a FAILURE. * the list property is unique. Args: folder_abs (str or None): the full path to the input Folder url_abs (str of None): the full path to the input URL list_files_bool (Boolean): set to True if the files within the folder/url are to be listed. list_dirs_bool (Boolean): set to True if the folders within the folder/url are to be listed. list_property (str): the name of the property to hold the output list of strings Returns: Boolean. If TRUE, the GeoLayer should be clipped. If FALSE, at least one check failed and the GeoLayer should not be clipped. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the Folder parameter is enabled, continue with the checks. if folder_abs: # If the Folder is not a valid folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "Folder", folder_abs, "FAIL")) # If the URL parameter is enabled, continue with the checks. if url_abs: # If the URL is not a valid url, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsUrlValid", "URL", url_abs, "FAIL")) # If both the ListFiles and the ListFolders are set to FALSE, raise a WARNING. if not list_files_bool and not list_dirs_bool: message = "Both ListFiles and ListFolders are set to FALSE. There will be no output." recommendation = "Set at lease one of the parameters to TRUE." self.logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) should_run_command.append(True) # If the ListProperty is the same as an already-existing Property, raise a WARNING or FAILURE (depends # on the value of the IfPropertyExists parameter.) should_run_command.append( validators.run_check(self, "IsPropertyUnique", "ListProperty", list_property, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True