class ReadTableFromDelimitedFile(AbstractCommand): """ Reads a Table from a delimited file. Command Parameters * InputFile (str, required): the relative or absolute pathname of the delimited file to read. * TableID (str, required): the identifier of the Table. * Delimiter (str, optional): the delimiter of the input file. Default is `,`. * HeaderLines (str, optional): The number of rows representing non-data comments. These columns are not included in the output Table data values. Default: 0 * NullValues (str, optional): A list of values within the delimited file that represent null values. Default: '' (an empty string) * IfTableIDExists (str, optional): This parameter determines the action that occurs if the TableID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("InputFile", type("")), CommandParameterMetadata("Delimiter", type("")), CommandParameterMetadata("TableID", type("")), CommandParameterMetadata("HeaderLines", type("")), CommandParameterMetadata("NullValues", type("")), CommandParameterMetadata("IfTableIDExists", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "ReadTableFromDelimitedFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Read a table from a delimited file." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # InputFile self.parameter_input_metadata[ 'InputFile.Description'] = "delimited file to read" self.parameter_input_metadata['InputFile.Label'] = "Input file" self.parameter_input_metadata['InputFile.Required'] = True self.parameter_input_metadata['InputFile.Tooltip'] = \ "The delimited file (relative or absolute path) to read. ${Property} syntax is recognized." self.parameter_input_metadata['InputFile.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'InputFile.FileSelector.Title'] = "Select a delimited file to read" # TableID self.parameter_input_metadata[ 'TableID.Description'] = "output table identifier" self.parameter_input_metadata['TableID.Label'] = "TableID" self.parameter_input_metadata['TableID.Required'] = True self.parameter_input_metadata['TableID.Tooltip'] = "A Table identifier" # Delimiter self.parameter_input_metadata[ 'Delimiter.Description'] = "delimiter character" self.parameter_input_metadata['Delimiter.Label'] = "Delimiter" self.parameter_input_metadata[ 'Delimiter.Tooltip'] = "The delimiter character of the input delimited file." # HeaderLines self.parameter_input_metadata[ 'HeaderLines.Description'] = "number of rows of non-data comments" self.parameter_input_metadata['HeaderLines.Label'] = "Header lines" self.parameter_input_metadata['HeaderLines.Tooltip'] = ( "The number of rows representing non-data comments. " "These columns are not included in the output Table data values.") self.parameter_input_metadata['HeaderLines.Value.Default'] = "0" # NullValues self.parameter_input_metadata[ 'NullValues.Description'] = "values that should convert to NULL" self.parameter_input_metadata['NullValues.Label'] = "Null values" self.parameter_input_metadata['NullValues.Tooltip'] = ( "A list of values within the delimited file that should br converted to NULL values. " "The Python None will be used internally.") self.parameter_input_metadata['NullValues.Value.Default'] = "None" # IfTableIDExists self.parameter_input_metadata[ 'IfTableIDExists.Description'] = "action if TableID exists" self.parameter_input_metadata[ 'IfTableIDExists.Label'] = "If table exists" self.parameter_input_metadata['IfTableIDExists.Tooltip'] = ( "The action that occurs if the TableID already exists within the GeoProcessor.\n" "Replace : The existing Table within the GeoProcessor is overwritten with the new Table. " "No warning is logged.\n" "ReplaceAndWarn: The existing Table within the GeoProcessor is overwritten with the new Table. " "A warning is logged.\n" "Warn : The new Table is not created. A warning is logged.\n" "Fail : The new Table is not created. A fail message is logged.") self.parameter_input_metadata['IfTableIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfTableIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter InputFile is a non-empty, non-None string. pv_InputFile = self.get_parameter_value( parameter_name='InputFile', command_parameters=command_parameters) if not validators.validate_string(pv_InputFile, False, False): message = "InputFile parameter has no value." recommendation = "Specify the InputFile parameter (relative or absolute pathname) to indicate the " \ "location and name of the output Excel file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter TableID is a non-empty, non-None string. pv_TableID = self.get_parameter_value( parameter_name='TableID', command_parameters=command_parameters) if not validators.validate_string(pv_TableID, False, False): message = "TableID parameter has no value." recommendation = "Specify the TableID parameter." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfTableIDExists is either `Replace`, `ReplaceAndWarn`, `Warn`, `Fail`, None. pv_IfTableIDExists = self.get_parameter_value( parameter_name="IfTableIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] if not validators.validate_string_in_list(pv_IfTableIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfTableIDExists parameter value ({}) is not recognized.".format( pv_IfTableIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfTableIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the HeaderLines is used, continue with the checks. pv_HeaderLines = self.get_parameter_value( "HeaderLines", command_parameters=command_parameters) if pv_HeaderLines: # Check that the HeaderLines parameter is an integer or None. if not validators.validate_int(pv_HeaderLines, True, False): message = "HeaderLines parameter value ({}) is not a valid integer value.".format( pv_HeaderLines) recommendation = "Specify a positive integer for the HeaderLines parameter to specify how" \ " many rows represent the header contnet of the delimited file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_read_table(self, input_file_abs, table_id): """ Checks the following: * the InputFile (absolute) is a valid file * the ID of the Table is unique (not an existing Table ID) Args: input_file_abs (str): the full pathname to the input data file table_id (str): the ID of the output Table Returns: Boolean. If TRUE, the reading process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input file is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "InputFile", input_file_abs, "FAIL")) # If the TableID is the same as an already-existing TableID, raise a WARNING or FAILURE (depends on the # value of the IfTableIDExists parameter.) should_run_command.append( validators.run_check(self, "IsTableIdUnique", "TableID", table_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True @staticmethod def __read_table_from_delimited_file(path, table_id, delimiter, header_count, null_values): """ Creates a GeoProcessor table object from a delimited file. Args: path (str): the path to the delimited file on the local machine table_id (str): the id of the GeoProcessor Table that is to be created delimiter (str): the delimiter of the input file header_count (int): the number of rows representing the header content (not data values) null_values (list): list of strings that are values in the delimited file representing null values Return: A GeoProcessor Table object. """ # Create a table object table = Table(table_id) # Open the csv file to read. with open(path, 'r') as csvfile: # Pass the csv file to the csv.reader object. Specify the delimiter. csvreader = csv.reader(csvfile, delimiter=delimiter) # Get the column headers as a list. Skip over any header lines of comments. for i in range(header_count + 1): col_headers = next(csvreader) # Iterate over the number of columns specified by a column header name. for i in range(len(col_headers)): # Create a TableField object and assign the field "name" as the column header name. table_field = TableField(col_headers[i]) # An empty list to hold the items within the TableField. col_content = [] # Reset the csv reader to start the reading of the csv file at the first row. csvfile.seek(0) # Skip the header rows. for i_head in range(header_count + 1): next(csvreader) # Iterate over the non-header rows and append the column items to the col_content list. for row in csvreader: col_content.append(row[i]) # Add the column contents to the TableField object. table_field.items = col_content # Set the null values table_field.null_values = null_values # Convert the data value that represent null values into None value.\ table_field.assign_nulls() # Convert the column contents to the correct data type. table_field.assign_data_type() # Add the updated table field object to the Table attribute. table.add_table_field(table_field) # Get the number of row entries. table.entry_count = len(table_field.items) # Iterate over the number of row entries. for i_row in range(table.entry_count): # Create a TableRecord object. table_record = TableRecord() # Iterate over the table fields. for i_col in range(len(table.table_fields)): new_item = table.table_fields[i_col].items[i_row] table_record.add_item(new_item) # Add the table record to the Table attributes. table.table_records.append(table_record) # Return the GeoProcessor Table object. return table def run_command(self): """ Run the command. Read the Table from the delimited file. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_InputFile = self.get_parameter_value("InputFile") pv_Delimiter = self.get_parameter_value("Delimiter", default_value=",") pv_TableID = self.get_parameter_value("TableID") pv_HeaderLines = int( self.get_parameter_value("HeaderLines", default_value="0")) pv_NullValues = self.get_parameter_value("NullValues", default_value="''") # Convert the InputFile parameter value relative path to an absolute path and expand for ${Property} syntax input_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_InputFile, self))) # Convert the NullValues parameter values to a list. pv_NullValues = string_util.delimited_string_to_list(pv_NullValues) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_read_table(input_file_absolute, pv_TableID): try: # Create the table from the delimited file. table = self.__read_table_from_delimited_file( input_file_absolute, pv_TableID, pv_Delimiter, pv_HeaderLines, pv_NullValues) # Add the table to the GeoProcessor's Tables list. self.command_processor.add_table(table) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error reading Table {} from delimited file ({}).".format( pv_TableID, input_file_absolute) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class CreateRegressionTestCommandFile(AbstractCommand): """ The CreateRegressionTestCommandFile examines a folder tree and creates a command file with RunCommand commands for all GeoProcessor test command files to run. This is used to automate creating the full test suite to test the software. """ __command_parameter_metadata = [ CommandParameterMetadata("SearchFolder", type("")), CommandParameterMetadata("FilenamePattern", type("")), CommandParameterMetadata("OutputFile", type("")) ] def __init__(self): """ Initialize a new instance of the command. """ super().__init__() self.command_name = "CreateRegressionTestCommandFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "This command is used for software functional testing and validation of workflow processes.\n" "It searches all folders in the selected folder to find tests to run and creates a " "test suite command file.") self.command_metadata['EditorType'] = "Simple" # Parameter Metadata self.parameter_input_metadata = dict() # SearchFolder self.parameter_input_metadata['SearchFolder.Description'] = "" self.parameter_input_metadata['SearchFolder.Label'] = "Search folder" self.parameter_input_metadata['SearchFolder.Tooltip'] = ( "The folder to search for regression test command files. " "All subfolders will also be searched. Can use ${Property}.") self.parameter_input_metadata['SearchFolder.Required'] = True self.parameter_input_metadata['SearchFolder.FileSelector.Type'] = "Read" self.parameter_input_metadata['SearchFolder.FileSelector.Button.Tooltip'] = "Browse for folder" self.parameter_input_metadata['SearchFolder.FileSelector.Tile'] = "Select folder to search for tests" self.parameter_input_metadata['SearchFolder.FileSelector.SelectFolder'] = True # OutputFile self.parameter_input_metadata['OutputFile.Description'] = "property file to write" self.parameter_input_metadata['OutputFile.Label'] = "Output file" self.parameter_input_metadata['OutputFile.Tooltip'] = ( "The property file to write, as an absolute path or relative to the command file, can use ${Property}.") self.parameter_input_metadata['OutputFile.Required'] = True self.parameter_input_metadata['OutputFile.FileSelector.Type'] = "Write" # FilenamePattern self.parameter_input_metadata['FilenamePattern.Description'] = "pattern to find command files" self.parameter_input_metadata['FilenamePattern.Label'] = "Filename pattern" self.parameter_input_metadata['FilenamePattern.Tooltip'] = ( "Pattern to find GeoProcessor command files, using * wildcards.") self.parameter_input_metadata['FilenamePattern.Value.Default'] = "test-*.gp" def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning_message = "" logger = logging.getLogger(__name__) # SearchFolder is required pv_SearchFolder = self.get_parameter_value(parameter_name='SearchFolder', command_parameters=command_parameters) if not validators.validate_string(pv_SearchFolder, False, False): message = "SearchFolder parameter has no value." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, "Specify the search folder.")) print(message) # FilenamePattern is optional, will default to "test-*" at runtime # OutputFile is required pv_OutputFile = self.get_parameter_value(parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, False, False): message = "OutputFile parameter has no value." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, "Specify the output file.")) print(message) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning_message = command_util.validate_command_parameter_names(self, warning_message) # If any warnings were generated, throw an exception if len(warning_message) > 0: logger.warning(warning_message) raise ValueError(warning_message) # Refresh the phase severity self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) @classmethod def __determine_expected_status_parameter(cls, filename): """ Determine the expected status parameter by searching the command file for an "@expectedStatus" string, generally in a comment at the top of the file. Args: filename (str): Name of file to open and scan. Returns: A string for the expectedStatus parameter or empty string if no expected status is known. Except: ValueError if the filename is not found. """ expected_status_parameter = "" with open(filename, "r") as f: for line in f: if line is None: break line_upper = line.upper() index = line_upper.find("@EXPECTEDSTATUS") if index >= 0: # Get the status as the next token after the tag expected_status = line_upper[index + len("@EXPECTEDSTATUS"):].strip() # Translate variations to the official name recognized by RunCommands() expected_status_upper = expected_status.upper() if expected_status_upper == "WARN" or expected_status_upper == "WARNING": expected_status = "Warning" elif expected_status_upper == "FAIL" or expected_status_upper == "FAILURE": expected_status = "Failure" expected_status_parameter = ',ExpectedStatus="' + expected_status + '"' break return expected_status_parameter # TODO smalers 2018-01-20 Evaluate whether to include additional functionality as per TSTool @classmethod def __get_matching_filenames_in_tree(cls, file_list, path, pattern_regex, pattern_string=None): """ Check all files and directories under the given folder and if the file matches a valid pattern it is added to the test list. Args: file_list ([str]): list of files that are matched, will be appended to. path (str): folder in which to start searching for matching files. pattern_regex (regex): Compiled Python regex to match when searching files, for example "test-*.gp". pattern_string (str): The pattern regex string before being compiled, used in logging. """ logger = logging.getLogger(__name__) # Use the following for troubleshooting debug = False if os.path.isdir(path): # A directory (folder) so get the children and recurse children = os.listdir(path) for child in children: # Recursively call with full path using the folder and child name. CreateRegressionTestCommandFile.__get_matching_filenames_in_tree(file_list, os.path.join(path, child), pattern_regex, pattern_string=pattern_string) else: # A file - add to list if file matches the pattern, but only check the filename (not leading path) filename = os.path.basename(path) if debug: logger.debug('Checking filename "' + filename + '" against "' + pattern_string + '"') # Do comparison on file name without directory. match_object = pattern_regex.match(filename) # If the match_object matching string is the same length as the input, then the whole string matches. # Otherwise, only a leading part matched this is not an exact match on the filename. # Note that the MatchObject.end() seems to be one past the input filename last character. # if match_object is not None: # # logger.info("start=" + str(match_object.start())) # # logger.info("end=" + str(match_object.end())) if match_object is not None and match_object.start() == 0 and match_object.end() == len(filename): # FIXME SAM 2007-10-15 Need to enable something like the following to make more robust # && isValidCommandsFile( dir ) if debug: logger.debug("File matched.") # Exclude the command file if tag in the file indicates that it is not compatible with # this command's parameters. # Test is to be included for the OS and test suite. # Include the path because it should be relative to the search path. file_list.append(path) else: if debug: logger.debug("File not matched.") pass def run_command(self): """ Run the command. Returns: Nothing. Raises: ValueError: if a runtime input error occurs. """ warning_count = 0 logger = logging.getLogger(__name__) # Get data for the command pv_SearchFolder = self.get_parameter_value('SearchFolder') pv_FilenamePattern = self.get_parameter_value('FilenamePattern') if pv_FilenamePattern is None or pv_FilenamePattern == "": # The pattern that is desired is test_*.gp, using globbing syntax pv_FilenamePattern = "[Tt][Ee][Ss][Tt]-*.gp" pv_OutputFile = self.get_parameter_value('OutputFile') # Runtime checks on input working_dir = self.command_processor.get_property('WorkingDir') logger.info('working_dir: "' + working_dir + '"') search_folder_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(working_dir, self.command_processor.expand_parameter_value(pv_SearchFolder, self))) search_folder_absolute_internal = io_util.verify_path_for_os(search_folder_absolute, always_use_forward_slashes=True) output_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(working_dir, self.command_processor.expand_parameter_value(pv_OutputFile, self))) output_file_absolute_internal = io_util.verify_path_for_os(output_file_absolute, always_use_forward_slashes=True) logger.info('search_folder_absolute: "' + search_folder_absolute + '"') logger.info('search_folder_absolute_internal: "' + search_folder_absolute_internal + '"') if not os.path.exists(search_folder_absolute_internal): message = 'The folder to search "' + search_folder_absolute + '" does not exist.' logger.warning(message) warning_count += 1 self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "Verify that the folder exists at the time the command is run.")) if warning_count > 0: message = "There were " + str(warning_count) + " warnings about command parameters." logger.warning(message) raise ValueError(message) # Do the processing try: # Output folder is used when constructing filenames for command files to run output_folder_absolute = os.path.dirname(output_file_absolute_internal) logger.info('output_folder_absolute: "' + output_folder_absolute + '"') logger.info('output_folder_absolute_internal: "' + output_file_absolute_internal + '"') files = [] # List of files to match # Convert globbing-style wildcards to Pythonic regex logger.info('Filename pattern using globbing = "' + pv_FilenamePattern + '"') # filename_pattern_regex = pv_FilenamePattern.replace("*", "[^/]*") # Match any character of any length, making sure that special characters are dealt with filename_pattern_regex = pv_FilenamePattern.replace(".", "\\.") # So .gp is handled literally # The following is used to match between "test-" and ".gp" filename_pattern_regex = filename_pattern_regex.replace("*", ".*") logger.info('Filename pattern using Python regex = "' + filename_pattern_regex + '"') filename_pattern_regex_compiled = re.compile(filename_pattern_regex) CreateRegressionTestCommandFile.__get_matching_filenames_in_tree(files, search_folder_absolute_internal, filename_pattern_regex_compiled, pattern_string=filename_pattern_regex) # Sort the list files = sorted(files) # Open the output file... # TODO smalers 2018-10-20 decide whether to support append mode out = open(output_file_absolute_internal, "w") # Write a standard header to the file so that it is clear when the file was created io_util.print_standard_file_header(out, comment_line_prefix="#") # Include the setup command file if requested # logger.info('Adding commands from setup command file "' + setup_command_file_absolute + '"') # include_setup_file(out, setup_file_absolute, "setup") # Include the matching test cases # Python 2... # nl = os.linesep # newline character for operating system # Python 3 translates \n into the OS-specific end of line so os.linesep introduces extra end of lines nl = "\n" out.write("#" + nl) out.write("# The following " + str(len(files)) + " test cases will be run to compare results with expected results." + nl) out.write("# Individual log files are generally created for each test." + nl) # TODO smalers 2018-01-20 evaluate how to handle output table # - Currently the GeoProcessor does not have table commands table_param = "" # Use absolute path since each developer will regenerate this file. out.write('StartRegressionTestResultsReport(OutputFile="' + output_file_absolute + '.out.txt"' + table_param + ")" + nl) # Find the list of matching files... # logger.debug('output_folder_absolute"' + output_folder_absolute + '"') for a_file in files: logger.info('Adding command file "' + a_file + '"') # The command files to run are relative to the commands file being created. # - use the operating system separator command_file_to_run = io_util.verify_path_for_os( io_util.to_relative_path(output_folder_absolute, a_file)) # Determine if the command file has @expectedStatus in it. If so, define an ExpectedStatus # parameter for the command. logger.info('Relative path to command file is "' + command_file_to_run + '"') out.write('RunCommands(CommandFile="' + command_file_to_run + '"' + CreateRegressionTestCommandFile.__determine_expected_status_parameter(a_file) + ')' + nl) # TODO smalers 2018-12-30 Maybe the output file is relative to the working folder # output_file_relative = io_util.to_relative_path(working_dir, output_file_absolute) # out.write('WriteCommandSummaryToFile(OutputFile="' + output_file_relative + '.summary.html")' + nl) out.write('WriteCommandSummaryToFile(OutputFile="' + output_file_absolute + '.summary.html")' + nl) # TODO smalers 2018-01-28 evaluate whether to support end content # Include the end command file if requested # Message.printStatus ( 2, routine, "Adding commands from end command file \"" + EndCommandFile_full + "\"") # includeCommandFile ( out, EndCommandFile_full, "end" ); out.close() # Add the log file to output self.command_processor.add_output_file(output_file_absolute) except Exception as e: warning_count += 1 message = 'Unexpected error creating regression test command file "' + output_file_absolute + '"' traceback.print_exc(file=sys.stdout) logger.exception(message, e) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) if warning_count > 0: message = "There were " + str(warning_count) + " warnings processing the command." raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class RemoveGeoLayerAttributes(AbstractCommand): """ Removes one or more attributes from a GeoLayer. * The names of the attributes to remove are specified. Command Parameters * GeoLayerID (str, required): the ID of the input GeoLayer, the layer to remove the attribute from * AttributeNames (str, required): the names of the attributes to remove. Strings separated by commas. Attribute names must be valid attribute fields to the GeoLayer. This parameter is case specific. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("AttributeNames", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "RemoveGeoLayerAttributes" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Removes one or more attributes from a GeoLayer." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # GeoLayerID self.parameter_input_metadata[ 'GeoLayerID.Description'] = "GeoLayer identifier" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Required'] = True self.parameter_input_metadata[ 'GeoLayerID.Tooltip'] = "The ID of the GeoLayer with the attribute to be removed." # AttributeNames self.parameter_input_metadata[ 'AttributeNames.Description'] = "names of the attributes to remove" self.parameter_input_metadata[ 'AttributeNames.Label'] = "Attribute Names" self.parameter_input_metadata['AttributeNames.Required'] = True self.parameter_input_metadata['AttributeNames.Tooltip'] =\ "The names of the attributes to be removed, separated by commas, case-specific." # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameters GeoLayerID and is a non-empty, non-None string. pv_GeoLayerID = self.get_parameter_value( parameter_name='GeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerID, False, False): message = "GeoLayerID parameter has no value." recommendation = "Specify the GeoLayerID parameter to indicate the input GeoLayer." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter AttributeNames is a non-empty, non-None string. pv_AttributeNames = self.get_parameter_value( parameter_name='AttributeNames', command_parameters=command_parameters) if not validators.validate_string(pv_AttributeNames, False, False): message = "AttributeNames parameter has no value." recommendation = "Specify the AttributeNames parameter to indicate the name of attribute to add." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_attribute_be_removed(self, geolayer_id, attribute_names): """ Checks the following: * The ID of the input GeoLayer is an actual GeoLayer (if not, log an error message and do not continue.) * The attribute names are valid names for the GeoLayer (if not, log an error message and do not continue.) Args: geolayer_id: the ID of the GeoLayer with the attribute to remove attribute_names (list of strings): the names of the attributes to remove from the GeoLayer Returns: remove_attribute: Boolean. If TRUE, the attribute should be removed from the GeoLayer. If FALSE, a check has failed and the attribute should not be removed. """ # Boolean to determine if the attribute should be removed. Set to TRUE until one or many checks fail. remove_attribute = True # If the input GeoLayer does not exist, raise a FAILURE. if not self.command_processor.get_geolayer(geolayer_id): # Boolean to determine if the attribute should be removed. remove_attribute = False self.warning_count += 1 message = 'The input GeoLayer ID ({}) does not exist.'.format( geolayer_id) recommendation = 'Specify a valid GeoLayerID.' self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the input GeoLayer does exist, continue with the checks. else: # Get the input GeoLayer object. input_geolayer = self.command_processor.get_geolayer(geolayer_id) # Get the existing attribute names of the input GeoLayer. list_of_existing_attributes = input_geolayer.get_attribute_field_names( ) # Create a list of invalid input attribute names. An invalid attribute name is an input attribute name # that is not matching any of the existing attribute names of the GeoLayer. invalid_attrs = (attr_name for attr_name in attribute_names if attr_name not in list_of_existing_attributes) # Iterate over the invalid input attribute names and raise a FAILURE for each. for invalid_attr in invalid_attrs: remove_attribute = False self.warning_count += 1 message = 'The attribute name ({}) is not valid.'.format( invalid_attr) recommendation = 'Specify a valid attribute name. Valid attributes for this layer are' \ ' as follows: {}'.format(list_of_existing_attributes) self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Return the Boolean to determine if the attribute should be removed. If TRUE, all checks passed. If FALSE, # one or many checks failed. return remove_attribute def run_command(self): """ Run the command. Remove the attribute from the GeoLayer. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_GeoLayerID = self.get_parameter_value("GeoLayerID") pv_AttributeNames = self.get_parameter_value("AttributeNames") # Convert the AttributeNames parameter from a string to a list. attribute_names_list = string_util.delimited_string_to_list( pv_AttributeNames) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_attribute_be_removed(pv_GeoLayerID, attribute_names_list): # Run the process. try: # Get the input GeoLayer. input_geolayer = self.command_processor.get_geolayer( pv_GeoLayerID) # Iterate over the input attributes to remove. for attribute_name in attribute_names_list: # Remove the attribute from the GeoLayer. input_geolayer.remove_attribute(attribute_name) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error removing attribute(s) ({}) from GeoLayer {}.".format( pv_AttributeNames, pv_GeoLayerID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class SetGeoLayerProperty(AbstractCommand): """ The SetGeoLayerProperty command sets a GeoLayer property. These properties are useful for controlling processing logic, for example selecting only layers that have a specific property value, tracking the state of processing, and using for quality control on the layer. The property values may not be able to be persisted because a layer format may """ __command_parameter_metadata = [ CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("PropertyName", type("")), CommandParameterMetadata("PropertyType", type("")), CommandParameterMetadata("PropertyValue", type("")) ] def __init__(self): """ Initialize a command instance. """ # AbstractCommand data super().__init__() self.command_name = "SetGeoLayerProperty" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Set the value of a GeoLayer property." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # GeoLayerID self.parameter_input_metadata[ 'GeoLayerID.Description'] = "GoeLayer identifier" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Required'] = True self.parameter_input_metadata[ 'GeoLayerID.Tooltip'] = "The GeoLayer identifier, can use ${Property}." # PropertyName self.parameter_input_metadata[ 'PropertyName.Description'] = "property name" self.parameter_input_metadata['PropertyName.Label'] = "Property name" self.parameter_input_metadata['PropertyName.Required'] = True self.parameter_input_metadata[ 'PropertyName.Tooltip'] = "The property name." # PropertyType self.parameter_input_metadata[ 'PropertyType.Description'] = "property type" self.parameter_input_metadata['PropertyType.Label'] = "Property type" self.parameter_input_metadata['PropertyType.Required'] = True self.parameter_input_metadata[ 'PropertyType.Tooltip'] = "The property type as bool, float, int, or str." self.parameter_input_metadata['PropertyType.Values'] = [ 'bool', 'float', 'int', 'str' ] # PropertyValue self.parameter_input_metadata[ 'PropertyValue.Description'] = "property value" self.parameter_input_metadata['PropertyValue.Label'] = "Property value" self.parameter_input_metadata['PropertyValue.Required'] = True self.parameter_input_metadata['PropertyValue.Tooltip'] =\ "The property value, as a string that can convert to the given type." def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" logger = logging.getLogger(__name__) # GeoLayerID is required # - non-empty, non-None string. # - existence of the GeoLayer will also be checked in run_command(). pv_GeoLayerID = self.get_parameter_value( parameter_name='GeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerID, False, False): message = "GeoLayerID parameter has no value." recommendation = "Specify the GeoLayerID parameter to indicate the GeoLayer to process." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # PropertyName is required pv_PropertyName = self.get_parameter_value( parameter_name='PropertyName', command_parameters=command_parameters) if not validators.validate_string(pv_PropertyName, False, False): message = "PropertyName parameter has no value." recommendation = "Specify a property name." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # PropertyType is required pv_PropertyType = self.get_parameter_value( parameter_name='PropertyType', command_parameters=command_parameters) property_types = ["bool", "float", "int", "long", "str"] if not validators.validate_string_in_list( pv_PropertyType, property_types, False, False): message = 'The requested property type "' + pv_PropertyType + '"" is invalid.' recommendation = "Specify a valid property type: " + str( property_types) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # PropertyValue is required pv_PropertyValue = self.get_parameter_value( parameter_name='PropertyValue', command_parameters=command_parameters) if not validators.validate_string(pv_PropertyValue, False, False): # TODO smalers 2017-12-28 add other parameters similar to TSTool to set special values message = "PropertyValue parameter is not specified." recommendation = "Specify a property value." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception if len(warning) > 0: logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def run_command(self): """ Run the command. Set a GeoLayer property value. Returns: Nothing. Raises: RuntimeError if any exception occurs running the command. """ warning_count = 0 logger = logging.getLogger(__name__) pv_GeoLayerID = self.get_parameter_value("GeoLayerID") pv_PropertyName = self.get_parameter_value('PropertyName') pv_PropertyType = self.get_parameter_value('PropertyType') pv_PropertyValue = self.get_parameter_value('PropertyValue') # Expand the property value string before converting to the requested type pv_PropertyValue_expanded = self.command_processor.expand_parameter_value( pv_PropertyValue) try: # Convert the property value string to the requested type pv_PropertyValue2 = None if pv_PropertyType == 'bool': pv_PropertyValue2 = bool(pv_PropertyValue_expanded) elif pv_PropertyType == 'float': pv_PropertyValue2 = float(pv_PropertyValue_expanded) elif pv_PropertyType == 'int': pv_PropertyValue2 = int(pv_PropertyValue_expanded) elif pv_PropertyType == 'str': pv_PropertyValue2 = str(pv_PropertyValue_expanded) # Now set the object as a property, will be the requested type if pv_PropertyValue2 is not None: self.command_processor.set_property(pv_PropertyName, pv_PropertyValue2) # Get the GeoLayer object geolayer = self.command_processor.get_geolayer(pv_GeoLayerID) if geolayer is None: message = 'Unable to find GeoLayer for GeoLayerID="' + pv_GeoLayerID + '"' warning_count += 1 logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "Check the log file for details.")) else: geolayer.set_property(pv_PropertyName, pv_PropertyValue2) except Exception as e: warning_count += 1 message = 'Unexpected error setting GeoLayer property "' + pv_PropertyName + '"' traceback.print_exc(file=sys.stdout) logger.exception(message, e) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "Check the log file for details.")) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class CloseDataStore(AbstractCommand): """ Close an existing open database connection. Command Parameters * DataStoreID (str, required): the DataStore identifier of the DataStore to close. ${Property} syntax enabled. * StatusMessage (str, optional): A status message to display when the DataStore information is viewed. The status may be reset if the connection is automatically restored, for example when a subsequent database interaction occurs. Default: "Not connected. Connection has been closed." ${Property} syntax enabled. Note that this is a placeholder parameter ported over from the TSTool command (CloseDataStore). It currently has no effect of the GeoProcessor environment. In future development this message could be hooked into the log or the UI. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("DataStoreID", type("")), CommandParameterMetadata("StatusMessage", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() # Name of command for menu and window title self.command_name = "CloseDataStore" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "This command closes a DataStore connection." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # DataStoreID self.parameter_input_metadata[ 'DataStoreID.Description'] = "the identifier of the DataStore to close" self.parameter_input_metadata['DataStoreID.Label'] = "DataStoreID" self.parameter_input_metadata['DataStoreID.Required'] = True self.parameter_input_metadata['DataStoreID.Tooltip'] = \ "The ID of the DataStore to close. Can be specified using ${Property}." # StatusMessage self.parameter_input_metadata[ 'StatusMessage.Description'] = "a status message to display" self.parameter_input_metadata['StatusMessage.Label'] = "Status Message" self.parameter_input_metadata['StatusMessage.Tooltip'] = \ "A status message to display when the datastore information is viewed.\nThe status may be reset if the " \ "connection is automatically restored, for example when a subsequent database interaction occurs.\n" \ "Can be specified using ${Property}." self.parameter_input_metadata[ 'StatusMessage.Value.Default'] = "DataStore [DataStoreID] has been closed" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that the DataStoreID is a non-empty, non-None string. pv_DataStoreID = self.get_parameter_value( parameter_name="DataStoreID", command_parameters=command_parameters) if not validators.validate_string(pv_DataStoreID, False, False): message = "DataStoreID parameter has no value." recommendation = "Specify a valid DataStore ID." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_close_datastore(self, datastore_id): """ Checks the following: * the DataStore ID is an existing DataStore ID Args: datastore_id (str): the ID of the DataStore to close Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Close an existing DataStore and remove it from the GeoProcessor. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the DataStoreID parameter value and expand for ${Property} syntax. pv_DataStoreID = self.get_parameter_value("DataStoreID") pv_DataStoreID = self.command_processor.expand_parameter_value( pv_DataStoreID, self) # Obtain the StatusMessage parameter value and expand for ${Property} syntax. pv_StatusMessage = self.get_parameter_value("StatusMessage") pv_StatusMessage = self.command_processor.expand_parameter_value( pv_StatusMessage, self) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_close_datastore(pv_DataStoreID): try: # Get the DataStore object datastore_obj = self.command_processor.get_datastore( pv_DataStoreID) # Close the database connection. datastore_obj.close_db_connection() # Update the DataStore's status message. datastore_obj.update_status_message(pv_StatusMessage) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error closing DataStore {}.".format( pv_DataStoreID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class CreatePointsAlongLine(AbstractCommand): """ Creates a copy of a GeoLayer in the GeoProcessor's geolayers list. The copied GeoLayer is added to the GeoProcessor's geolayers list. Command Parameters * GeoLayerID (str, required): The ID of the existing GeoLayer to copy. * CopiedGeoLayerID (str, optional): The ID of the copied GeoLayer. Default "{}_copy".format(GeoLayerID) * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the CopiedGeoLayerID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define command parameters. __command_parameter_metadata = [ CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("CopiedGeoLayerID", type("")), CommandParameterMetadata("IfGeoLayerIDExists", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "CreatePointsAlongLine" self.command_parameter_metadata = self.__command_parameter_metadata # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter GeoLayerID is a non-empty, non-None string. pv_GeoLayerID = self.get_parameter_value( parameter_name='GeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerID, False, False): message = "GeoLayerID parameter has no value." recommendation = "Specify the GeoLayerID parameter to indicate the GeoLayer to copy." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfGeoLayerIDExists is either `Replace`, `ReplaceAndWarn`, `Warn`, `Fail`, None. pv_IfGeoLayerIDExists = self.get_parameter_value( parameter_name="IfGeoLayerIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] if not validators.validate_string_in_list(pv_IfGeoLayerIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfGeoLayerIDExists parameter value ({}) is not recognized.".format( pv_IfGeoLayerIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfGeoLayerIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_create_points(self, input_geolayer_id, output_geolayer_id): """ Checks the following: * the ID of the input GeoLayer is an existing GeoLayer ID * the geometry of the input GeoLayer is a LineString * check that distance is greater than 0 Args: input_geolayer_id: the ID of the input GeoLayer output_geolayer_id: the ID of the output, copied GeoLayer Returns: run_copy: Boolean. If TRUE, the copy process should be run. If FALSE, the copy process should not be run. """ # Boolean to determine if the copy process should be run. Set to true until an error occurs. run_copy = True # If the input GeoLayer ID is not an existing GeoLayer ID, raise a FAILURE. if not self.command_processor.get_geolayer(input_geolayer_id): run_copy = False self.warning_count += 1 message = 'The GeoLayerID ({}) is not a valid GeoLayer ID.'.format( input_geolayer_id) recommendation = 'Specify a valid GeoLayerID.' self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the output GeoLayer ID is the same as an already-registered GeoLayerID, react according to the # pv_IfGeoLayerIDExists value. elif self.command_processor.get_geolayer(output_geolayer_id): # Get the IfGeoLayerIDExists parameter value. pv_IfGeoLayerIDExists = self.get_parameter_value( "IfGeoLayerIDExists", default_value="Replace") # Warnings/recommendations if the OutputGeolayerID is the same as a registered GeoLayerID. message = 'The CopiedGeoLayerID ({}) value is already in use as a GeoLayer ID.'.format( output_geolayer_id) recommendation = 'Specify a new GeoLayerID.' # The registered GeoLayer should be replaced with the new GeoLayer (with warnings). if pv_IfGeoLayerIDExists.upper() == "REPLACEANDWARN": self.warning_count += 1 self.logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # The registered GeoLayer should not be replaced. A warning should be logged. if pv_IfGeoLayerIDExists.upper() == "WARN": run_copy = False self.warning_count += 1 self.logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # The matching IDs should cause a FAILURE. elif pv_IfGeoLayerIDExists.upper() == "FAIL": run_copy = False self.warning_count += 1 self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Return the Boolean to determine if the copy process should be run. If TRUE, all checks passed. If FALSE, # one or many checks failed. return run_copy def run_command(self): """ Run the command. Make a copy of the GeoLayer and add the copied GeoLayer to the GeoProcessor's geolayers list. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ pass
class WriteCommandSummaryToFile(AbstractCommand): """ The WriteCommandsSummaryToFile command writes a summary of command run information. CommandLogRecord instances are output to a simple HTML file. """ # TODO smalers 2018-01-28 in the future allow "Format", with "HTML" as default. __command_parameter_metadata = [ CommandParameterMetadata("OutputFile", type("")) ] def __init__(self): """ Initialize a new instance of the command. """ # AbstractCommand data super().__init__() self.command_name = "WriteCommandSummaryToFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Write command logging messages to a summary file." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # OutputFile self.parameter_input_metadata['OutputFile.Description'] = "output file" self.parameter_input_metadata['OutputFile.Label'] = "Output File" self.parameter_input_metadata['OutputFile.Required'] = True self.parameter_input_metadata['OutputFile.Tooltip'] = ( "The output file to write, as an absolute path or relative to the command file.\n" "Can use ${Property}.") self.parameter_input_metadata['OutputFile.FileSelector.Type'] = "Write" self.parameter_input_metadata[ 'OutputFile.FileSelector.Title'] = "Select the output file" def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning_message = "" logger = logging.getLogger(__name__) # OutputFile is required pv_OutputFile = self.get_parameter_value( parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, False, False): message = "The OutputFile must be specified." recommendation = "Specify the output file." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning_message = command_util.validate_command_parameter_names( self, warning_message) # If any warnings were generated, throw an exception if len(warning_message) > 0: logger.warning(warning_message) raise ValueError(warning_message) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def run_command(self): """ Run the command. Create a file summarizing command log messages, status, etc. Returns: None. Raises: RuntimeError: if a runtime input error occurs. """ warning_count = 0 logger = logging.getLogger(__name__) # Get data for the command pv_OutputFile = self.get_parameter_value('OutputFile') # Runtime checks on input pv_OutputFile_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_OutputFile, self))) if warning_count > 0: message = "There were " + str( warning_count) + " warnings about command parameters." logger.warning(message) raise ValueError(message) # Create the command summary file try: pv_OutputFile_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_OutputFile, self))) # Open the file and write command summary logger.info('Writing summary to "' + pv_OutputFile_absolute + '"') fp = open(pv_OutputFile_absolute, "w") self.write_file_header(fp) self.write_command_summary(fp) self.write_file_footer(fp) # Close the file fp.close() except Exception as e: warning_count += 1 message = 'Unexpected error writing file "' + pv_OutputFile_absolute + '"' logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) except: warning_count += 1 message = 'Unexpected error writing file "' + pv_OutputFile_absolute + '"' logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." logger.warning(message) raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS) def write_command_summary(self, fp): """ Write the command summary table. Args: fp: Open file object. Returns: None. """ nl = os.linesep fp.write('<h1>Command Summary</h1>' + nl) fp.write('<table class="table-style-hover">' + nl) fp.write('<thead>' + nl) fp.write('<tr>' + nl) fp.write('<th>Command #</th>' + nl) fp.write('<th>Status (all)</th>' + nl) fp.write('<th>Status (init)</th>' + nl) # fp.write('<th>Max Status, discovery<th>' + nl) fp.write('<th>Status (run)</th>' + nl) fp.write('<th>Command</th>' + nl) fp.write('</tr>' + nl) fp.write('</thead>' + nl) # Output a list of all commands with the most severe status shown i = 0 for command in self.command_processor.commands: i += 1 fp.write('<tr>' + nl) fp.write('<td><a href="#' + str(i) + '">' + str(i) + '</a></td>' + nl) # TODO smalers 2018-01-29 Why don't the commented calls work? # overall_status = command_util.get_highest_command_status_severity(self.command_status) # initialization_status = self.command_status.get_command_status_for_phase(CommandPhaseType.INITIALIZATION) initialization_status = command.command_status.initialization_status # run_status = self.command_status.get_command_status_for_phase(CommandPhaseType.RUN) run_status = command.command_status.run_status overall_status = initialization_status if run_status.value > overall_status.value: overall_status = run_status fp.write('<td class="' + str(overall_status) + '">' + str(overall_status) + '</td>' + nl) fp.write('<td class="' + str(initialization_status) + '">' + str(initialization_status) + '</td>' + nl) fp.write('<td class="' + str(run_status) + '">' + str(run_status) + '</td>' + nl) fp.write('<td><code>' + command.command_string + '<code></td>' + nl) fp.write('</tr>' + nl + nl) fp.write('</table>' + nl) # Output a table for each command i = 0 for command in self.command_processor.commands: i += 1 fp.write('<p>' + nl) initialization_status = command.command_status.initialization_status run_status = command.command_status.run_status fp.write('<a name="' + str(i) + '"></a>' + str(i) + ":" + '<span class="' + str(initialization_status) + '" style="border: solid; border-width: 1px;">' + 'Initialization</span> <span class="' + str(run_status) + '" style="border: solid; border-width: 1px;">' + 'Run</span><code> ' + str(command.command_string) + '</code>' + nl) fp.write('<table class="table-style-hover">' + nl) fp.write('<thead>' + nl) fp.write('<tr>' + nl) fp.write('<th>#</th>' + nl) fp.write('<th>#</th>' + nl) fp.write('<th>Phase</th>' + nl) fp.write('<th>Severity</th>' + nl) fp.write('<th>Problem</th>' + nl) fp.write('<th>Recommendation</th>' + nl) fp.write('</tr>' + nl) fp.write('</thead>' + nl) # Output initialization log records j = 0 for log_record in command.command_status.initialization_log_list: j += 1 fp.write('<tr>' + nl) fp.write('<td>' + str(j) + '</th>' + nl) fp.write('<td></td>' + nl) fp.write('<td>' + str(CommandPhaseType.INITIALIZATION) + '</td>' + nl) fp.write('<td class="' + str(log_record.severity) + '">' + str(log_record.severity) + '</td>' + nl) fp.write('<td>' + log_record.problem + '</td>' + nl) fp.write('<td>' + log_record.recommendation + '</td>' + nl) fp.write('</tr>' + nl) # Output discovery log records j = 0 for log_record in command.command_status.discovery_log_list: j += 1 fp.write('<tr>' + nl) fp.write('<td>' + str(j) + '</td>' + nl) fp.write('<td></td>' + nl) fp.write('<td>' + str(CommandPhaseType.DISCOVERY) + '</td>' + nl) fp.write('<td class="' + str(log_record.severity) + '">' + str(log_record.severity) + '</td>' + nl) fp.write('<td>' + log_record.problem + '</td>' + nl) fp.write('<td>' + log_record.recommendation + '</td>' + nl) fp.write('</tr>' + nl) # Output run log records j = 0 for log_record in command.command_status.run_log_list: j += 1 fp.write('<tr>' + nl) fp.write('<td>' + str(j) + '</td>' + nl) fp.write('<td></td>' + nl) fp.write('<td>' + str(CommandPhaseType.RUN) + '</td>' + nl) fp.write('<td class="' + str(log_record.severity) + '">' + str(log_record.severity) + '</td>' + nl) fp.write('<td>' + log_record.problem + '</td>' + nl) fp.write('<td>' + log_record.recommendation + '</td>' + nl) fp.write('</tr>' + nl) # TODO smalers 2018-01-28 Need to figure out how to show records from original commands # when a command file is run by RunCommands() command fp.write('</table>' + nl) fp.write('<hr>' + nl) fp.write('</p>' + nl) def write_file_footer(self, fp): """ Write the file header. Args: fp: Open file object. Returns: None. """ nl = os.linesep fp.write('</body>' + nl) fp.write('</html>' + nl) def write_file_header(self, fp): """ Write the file header. Args: fp: Open file object. Returns: None. """ nl = os.linesep fp.write('<!DOCTYPE html>' + nl) fp.write('<html>' + nl) fp.write('<head>' + nl) fp.write('<meta charset="utf-8">' + nl) fp.write('<style>' + nl) fp.write(' html * {' + nl) fp.write(' font-family: "Arial", Helvetica, sans-serif;' + nl) fp.write(' }' + nl) fp.write(' code {' + nl) fp.write(' font-family: monospace;' + nl) fp.write(' font-style: normal;' + nl) fp.write(' font-size: large;' + nl) fp.write(' }' + nl) fp.write(' table.table-style-hover {' + nl) fp.write( ' border-width: 1px; border-style: solid; border-color: #3A3A3A; ' + 'border-collapse: collapse; padding: 4px;' + nl) fp.write(' }' + nl) fp.write(' table.table-style-hover th {' + nl) fp.write( ' border-width: 1px; border-style: solid; border-color: #3A3A3A; padding: 4px;' + nl) fp.write(' }' + nl) fp.write(' table.table-style-hover td {' + nl) fp.write( ' border-width: 1px; border-style: solid; border-color: #3A3A3A; padding: 4px;' + nl) fp.write(' }' + nl) fp.write(' .UNKNOWN {' + nl) fp.write(' background-color: rgb(200,200,200);' + nl) fp.write(' }' + nl) fp.write(' .SUCCESS {' + nl) fp.write(' background-color: rgb(0,255,0);' + nl) fp.write(' }' + nl) fp.write(' .WARNING {' + nl) fp.write(' background-color: rgb(255,255,0);' + nl) fp.write(' }' + nl) fp.write(' .FAILURE {' + nl) fp.write(' background-color: rgb(255,0,0);' + nl) fp.write(' }' + nl) fp.write('</style>' + nl) fp.write('<title>Command Summary</title>' + nl) fp.write('</head>' + nl) fp.write('<body>' + nl)
class RunSql(AbstractCommand): """ Executes a Structured Query Language (SQL) statement on a DataStore. Command Parameters * DataStoreID (str, required): the DataStore identifier to run the SQL statement on. ${Property} notation enabled. * Sql (str, optional): The SQL statement text that will be executed, optionally using ${Property} notation to insert processor property values. If specified, do not specify SqlFile or DataStoreProcedure. * SqlFile (str, optional): The name of the file containing an SQL statement to execute, optionally using ${Property} notation in the SQL file contents to insert processor property values. If specified, do not specify Sql or DataStoreProcedure. * DataStoreProcedure (str, optional): The name of the database procedure to run. Currently, only procedures that do not require parameters can be run. If specified, do not specify Sql or SqlFile. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("DataStoreID", type("")), CommandParameterMetadata("Sql", type("")), CommandParameterMetadata("SqlFile", type("")), CommandParameterMetadata("DataStoreProcedure", type(""))] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "RunSql" self.command_parameter_metadata = self.__command_parameter_metadata # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that the DataStoreID is a non-empty, non-None string. pv_DataStoreID = self.get_parameter_value(parameter_name="DataStoreID", command_parameters=command_parameters) if not validators.validate_string(pv_DataStoreID, False, False): message = "DataStoreID parameter has no value." recommendation = "Specify a valid DataStore ID." warning += "\n" + message self.command_status.add_to_log(CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that one (and only one) sql method is a non-empty and non-None string. is_string_list = [] sql_method_parameter_list = ["Sql", "SqlFile", "DataStoreProcedure"] for parameter in sql_method_parameter_list: parameter_value = self.get_parameter_value(parameter_name=parameter, command_parameters=command_parameters) is_string_list.append(validators.validate_string(parameter_value, False, False)) if not is_string_list.count(True) == 1: message = "Must enable one (and ONLY one) of the following parameters: {}".format(sql_method_parameter_list) recommendation = "Specify the value for one (and ONLY one) of the following parameters: {}".format( sql_method_parameter_list) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # TEMPORARY CHECK: Check that the DataStoreProcedure method is not being used. Currently disabled until future # development. Once developed, this check can be removed. else: pv_DataStoreProcedure = self.get_parameter_value(parameter_name="DataStoreProcedure", command_parameters=command_parameters) if validators.validate_string(pv_DataStoreProcedure, none_allowed=False, empty_string_allowed=False): message = "DataStoreProcedure is not currently enabled." recommendation = "Specify the Sql method or the SqlFile method. " warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_run_sql(self, datastore_id): """ Checks the following: * the DataStore ID is an existing DataStore ID Args: datastore_id (str): the ID of the DataStore to close Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Execute the Sql statement on the DataStore. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. The DatabasePort parameter value will be obtained later in the code. pv_DataStoreID = self.get_parameter_value("DataStoreID") pv_Sql = self.get_parameter_value("Sql") pv_SqlFile = self.get_parameter_value("SqlFile") pv_DataStoreProcedure = self.get_parameter_value("DataStoreProcedure") # Expand for ${Property} syntax. pv_DataStoreID = self.command_processor.expand_parameter_value(pv_DataStoreID, self) pv_Sql = self.command_processor.expand_parameter_value(pv_Sql, self) if pv_SqlFile: pv_SqlFile = io_util.verify_path_for_os(io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_SqlFile, self))) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_run_sql(pv_DataStoreID): try: # Get the DataStore object datastore_obj = self.command_processor.get_datastore(pv_DataStoreID) # If using the Sql method, the sql_statement is the user-provided sql statement. if pv_Sql: sql_statement = pv_Sql # If using the SqlFile method, the sql_statement in read from the provided file. elif pv_SqlFile: # Get the SQL statement from the file. f = open(pv_SqlFile, 'r') sql_statement = f.read().strip() # If using the DataStoreProcedure method, ... . else: sql_statement = None # Execute and commit the SQL statement. datastore_obj.run_sql(sql_statement) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error executing the Sql statement on the {} DataStore.".format(pv_DataStoreID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format(self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class EndIf(AbstractCommand): """ The EndIf command indicates the end of an If block. """ __command_parameter_metadata = [CommandParameterMetadata("Name", type(""))] def __init__(self): """ Initialize the command instance. """ super().__init__() self.command_name = "EndIf" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "This command ends a block of commands that start with an If command.\n" "The If and EndIf commands must have the same value for the Name parameter to " "allow the command processor to determine the start and end of the block." ) self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # Name self.parameter_input_metadata[ 'Name.Description'] = "the name that will be matched with name of an If command" self.parameter_input_metadata['Name.Label'] = "Name" self.parameter_input_metadata['Name.Required'] = True self.parameter_input_metadata['Name.Tooltip'] = ( "The name that will be matched with the name of an If command to indicate the block of commands in the " "if condition.") def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" logger = logging.getLogger(__name__) # Name is required pv_Name = self.get_parameter_value( parameter_name='Name', command_parameters=command_parameters) if not validators.validate_string(pv_Name, False, False): message = "A name for the EndIf block must be specified" recommendation = "Specify the Name." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception if len(warning) > 0: logger.warn(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def get_name(self): """ Return the name of the EndIf (will match name of corresponding If). Returns: The name of the EndIf (will match name of corresponding If). """ return self.command_parameters.get("Name", None) def run_command(self): """ Run the command. Does not do anything since the command is just a place-holder to match If(). Returns: Nothing. """ pass
class RunProgram(AbstractCommand): """ The RunProgram command runs a command file. """ __command_parameter_metadata = [ CommandParameterMetadata("CommandLine", type("")), CommandParameterMetadata("UseCommandShell", type("")), CommandParameterMetadata("IncludeParentEnvVars", type("")), CommandParameterMetadata("IncludeEnvVars", type("")), CommandParameterMetadata( "IncludeEnvVarName1", type("")), # Used for complex values difficult to parse CommandParameterMetadata("IncludeEnvVarValue1", type("")), CommandParameterMetadata("IncludeEnvVarName2", type("")), CommandParameterMetadata("IncludeEnvVarValue2", type("")), CommandParameterMetadata("IncludeEnvVarName3", type("")), CommandParameterMetadata("IncludeEnvVarValue3", type("")), CommandParameterMetadata("IncludeEnvVarName4", type("")), CommandParameterMetadata("IncludeEnvVarValue4", type("")), CommandParameterMetadata("IncludeEnvVarName5", type("")), CommandParameterMetadata("IncludeEnvVarValue5", type("")), CommandParameterMetadata("ExcludeEnvVars", type("")), CommandParameterMetadata("OutputFiles", type("")) ] # Choices for UseCommandShell, used to validate parameter and display in editor __choices_UseCommandShell = ["False", "True"] # Choices for IncludeParentEnvVars, used to validate parameter and display in editor __choices_IncludeParentEnvVars = ["False", "True"] def __init__(self): """ Initialize a new instance of the command. """ # AbstractCommand data super().__init__() self.command_name = "RunProgram" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "Run an external program, given the full command line, " "and wait until the program is finished before processing additional commands. " ) self.command_metadata['EditorType'] = "Simple" # Parameter metadata self.parameter_input_metadata = dict() # CommandLine self.parameter_input_metadata[ 'CommandLine.Description'] = "command line with arguments" self.parameter_input_metadata['CommandLine.Label'] = "Command to run" self.parameter_input_metadata['CommandLine.Tooltip'] = "" self.parameter_input_metadata['CommandLine.Required'] = True # UseCommandShell self.parameter_input_metadata[ 'UseCommandShell.Description'] = "use command shell" self.parameter_input_metadata[ 'UseCommandShell.Label'] = "Use command shell?" self.parameter_input_metadata['UseCommandShell.Tooltip'] = "" self.parameter_input_metadata['UseCommandShell.Values'] = [ "", "False", "True" ] self.parameter_input_metadata[ 'UseCommandShell.Value.Default'] = "False" # IncludeParentEnvVars self.parameter_input_metadata['IncludeParentEnvVars.Description'] = "" self.parameter_input_metadata[ 'IncludeParentEnvVars.Label'] = "Include parent environment variables" self.parameter_input_metadata['IncludeParentEnvVars.Tooltip'] = ( "Indicate whether the parent environment variables should be passed to the program run environment." ) self.parameter_input_metadata['IncludeParentEnvVars.Values'] = [ "", "True", "False" ] self.parameter_input_metadata[ 'IncludeParentEnvVars.Value.Default'] = "True" # IncludeEnvVars self.parameter_input_metadata['IncludeEnvVars.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVars.Label'] = "Include environment variables" self.parameter_input_metadata['IncludeEnvVars.Tooltip'] = ( "Specify environment variables to be defined for the program run environment in format:" "VAR1=Value1,VAR2=Value2.") # IncludeEnvVarName1 self.parameter_input_metadata['IncludeEnvVarName1.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarName1.Label'] = 'Include environment variable name 1' self.parameter_input_metadata['IncludeEnvVarName1.Tooltip'] = ( "Specify the name of as single environment variable to be defined for the program run environment." ) # IncludeEnvVarValue1 self.parameter_input_metadata['IncludeEnvVarValue1.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarValue1.Label'] = 'Include environment variable value 1' self.parameter_input_metadata['IncludeEnvVarValue1.Tooltip'] = ( "Specify the value of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarName2 self.parameter_input_metadata['IncludeEnvVarName2.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarName2.Label'] = 'Include environment variable name 2' self.parameter_input_metadata['IncludeEnvVarName2.Tooltip'] = ( "Specify the name of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarValue2 self.parameter_input_metadata['IncludeEnvVarValue2.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarValue2.Label'] = 'Include environment variable value 2' self.parameter_input_metadata['IncludeEnvVarValue2.Tooltip'] = ( "Specify the value of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarName3 self.parameter_input_metadata['IncludeEnvVarName3.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarName3.Label'] = 'Include environment variable name 3' self.parameter_input_metadata['IncludeEnvVarName3.Tooltip'] = ( "Specify the name of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarValue3 self.parameter_input_metadata['IncludeEnvVarValue3.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarValue3.Label'] = 'Include environment variable value 3' self.parameter_input_metadata['IncludeEnvVarValue3.Tooltip'] = ( "Specify the value of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarName4 self.parameter_input_metadata['IncludeEnvVarName4.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarName4.Label'] = 'Include environment variable name 4' self.parameter_input_metadata['IncludeEnvVarName4.Tooltip'] = ( "Specify the name of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarValue4 self.parameter_input_metadata['IncludeEnvVarValue4.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarValue4.Label'] = 'Include environment variable value 4' self.parameter_input_metadata['IncludeEnvVarValue4.Tooltip'] = ( "Specify the value of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarName5 self.parameter_input_metadata['IncludeEnvVarName5.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarName5.Label'] = 'Include environment variable name 5' self.parameter_input_metadata['IncludeEnvVarName5.Tooltip'] = ( "Specify the name of as single environment variable to be defined for the program run environment. " ) # IncludeEnvVarValue5 self.parameter_input_metadata['IncludeEnvVarValue5.Description'] = "" self.parameter_input_metadata[ 'IncludeEnvVarValue5.Label'] = 'Include environment variable value 5' self.parameter_input_metadata['IncludeEnvVarValue5.Tooltip'] = ( "Specify the value of as single environment variable to be defined for the program run environment. " ) # ExcludeEnvVars self.parameter_input_metadata['ExcludeEnvVars.Description'] = "" self.parameter_input_metadata[ 'ExcludeEnvVars.Label'] = 'Exclude environment variables' self.parameter_input_metadata['ExcludeEnvVars.Tooltip'] = ( "Specify environment variables to be removed from the program run environment, separated by commas." ) # OutputFiles self.parameter_input_metadata['OutputFiles.Description'] = "" self.parameter_input_metadata['OutputFiles.Label'] = "Output files" self.parameter_input_metadata['OutputFiles.Tooltip'] = ( "Specify the output files, separated by commas. Can specify with ${Property}." ) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning_message = "" logger = logging.getLogger(__name__) # CommandLine is required, pending other options pv_CommandLine = self.get_parameter_value( parameter_name='CommandLine', command_parameters=command_parameters) if not validators.validate_string(pv_CommandLine, False, False): message = "The CommandLine must be specified." recommendation = "Specify the command line." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # IncludeParentEnvVars is optional, will default to True at runtime pv_IncludeParentEnvVars = self.get_parameter_value( parameter_name='IncludeParentEnvVars', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_IncludeParentEnvVars, self.__choices_IncludeParentEnvVars, True, True): message = "IncludeParentEnvVars parameter is invalid." recommendation = "Specify the IncludeParentEnvVars parameter as blank or one of " + \ str(self.__choices_IncludeParentEnvVars) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # UseCommandShell is optional, will default to False at runtime pv_UseCommandShell = self.get_parameter_value( parameter_name='UseCommandShell', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_UseCommandShell, self.__choices_UseCommandShell, True, True): message = "UseCommandShell parameter is invalid." recommendation = "Specify the UseCommandShell parameter as blank or one of " + \ str(self.__choices_UseCommandShell) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # TODO smalers 2018-12-16 need to make sure IncludeEnvVars and ExcludeEnvVars are valid lists # - for now allow any string to be specified # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning_message = command_util.validate_command_parameter_names( self, warning_message) # If any warnings were generated, throw an exception if len(warning_message) > 0: logger.warning(warning_message) raise ValueError(warning_message) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def create_env_dict(self, include_parent_env_vars, include_env_vars_dict, exclude_env_vars_list): """ Create the environment variable dictionary for the called program. Args: include_parent_env_vars: If True, include the parent environment variables. If include_env_vars and exclude_env_vars are None, return None since None will cause the parent environment to be passed by default. If include_env_vars or exclude_env_vars are not None, copy the parent environment variables and then modify as per include_env_vars and exclude_env_vars. include_env_vars_dict: A dictionary of environment variables to include in the environment. Any variables found in the environment variable list will be reset to the given value. exclude_env_vars_list: A list of environment variables to not include in the environment. Variables will be removed from the returned dictionary. Returns: A dictionary of environment variables to use when calling the program, or None to default to the entire parent environment. """ if include_env_vars_dict is not None or exclude_env_vars_list is not None: # Need to process the dictionary specifically env_dict = {} # First, if the parent environment variables are to be used, create a dictionary that has # a copy of all parent environment variables. if include_parent_env_vars: for key, value in os.environ.items(): env_dict[key] = value # Add the environment variables to be included. if include_env_vars_dict is not None: for key, value in include_env_vars_dict.items(): env_dict[key] = value # Remove the environment variables to be excluded. if exclude_env_vars_list is not None: for key in exclude_env_vars_list: try: del env_dict[key] except KeyError: # OK to ignore because may not exist in the dictionary pass # Return the environment variable dictionary return env_dict else: # No granular handling of environment variables occurs if include_parent_env_vars: # All of the parent environment variables should be used # - return None since the default is to use the parent environment return None else: # Don't want the parent environment to be visible to called program. # - because None would return the default, create an empty dictionary # - also add SystemRoot as per Python documentation, to find DLLs. env_dict = {} if os_util.is_windows_os(): env_dict['SystemRoot'] = os.environ['SystemRoot'] return env_dict def run_command(self): """ Run the command. Run the program, which can generate output files. Returns: None. Raises: ValueError: if a runtime input error occurs. RuntimeError: if a runtime error occurs. """ warning_count = 0 logger = logging.getLogger(__name__) logger.info('In RunProgram.run_command') # Get data for the command print("command parameters=" + string_util.format_dict(self.command_parameters)) pv_CommandLine = self.get_parameter_value('CommandLine') pv_UseCommandShell = self.get_parameter_value('UseCommandShell') use_command_shell = False # Default if pv_UseCommandShell is not None and pv_UseCommandShell == 'True': use_command_shell = True pv_IncludeParentEnvVars = self.get_parameter_value( 'IncludeParentEnvVars') include_parent_env_vars = True # Default if pv_IncludeParentEnvVars is not None and pv_IncludeParentEnvVars == 'False': include_parent_env_vars = False pv_IncludeEnvVars = self.get_parameter_value('IncludeEnvVars') include_env_vars_dict = None if pv_IncludeEnvVars is not None and pv_IncludeEnvVars != "": # Have specified environment variables to include # - expand the environment variable value using processor properties include_env_vars_dict = string_util.delimited_string_to_dictionary_one_value( pv_IncludeEnvVars, key_value_delimiter="=", trim=True) for key, value in include_env_vars_dict.items(): include_env_vars_dict[ key] = self.command_processor.expand_parameter_value( value, self) # Add environment variables individually by name # - these are used when a list of parameters is difficult to parse # - this is kind of ugly but meets requirements in the short term pv_IncludeEnvVarName1 = self.get_parameter_value('IncludeEnvVarName1') pv_IncludeEnvVarValue1 = self.get_parameter_value( 'IncludeEnvVarValue1') if pv_IncludeEnvVarName1 is not None and pv_IncludeEnvVarName1 != "": if include_env_vars_dict is None: include_env_vars_dict = {} include_env_vars_dict[ pv_IncludeEnvVarName1] = pv_IncludeEnvVarValue1 pv_IncludeEnvVarName2 = self.get_parameter_value('IncludeEnvVarName2') pv_IncludeEnvVarValue2 = self.get_parameter_value( 'IncludeEnvVarValue2') if pv_IncludeEnvVarName2 is not None and pv_IncludeEnvVarName2 != "": if include_env_vars_dict is None: include_env_vars_dict = {} include_env_vars_dict[ pv_IncludeEnvVarName2] = pv_IncludeEnvVarValue2 pv_IncludeEnvVarName3 = self.get_parameter_value('IncludeEnvVarName3') pv_IncludeEnvVarValue3 = self.get_parameter_value( 'IncludeEnvVarValue3') if pv_IncludeEnvVarName3 is not None and pv_IncludeEnvVarName3 != "": if include_env_vars_dict is None: include_env_vars_dict = {} include_env_vars_dict[ pv_IncludeEnvVarName3] = pv_IncludeEnvVarValue3 pv_IncludeEnvVarName4 = self.get_parameter_value('IncludeEnvVarName4') pv_IncludeEnvVarValue4 = self.get_parameter_value( 'IncludeEnvVarValue4') if pv_IncludeEnvVarName4 is not None and pv_IncludeEnvVarName4 != "": if include_env_vars_dict is None: include_env_vars_dict = {} include_env_vars_dict[ pv_IncludeEnvVarName4] = pv_IncludeEnvVarValue4 pv_IncludeEnvVarName5 = self.get_parameter_value('IncludeEnvVarName5') pv_IncludeEnvVarValue5 = self.get_parameter_value( 'IncludeEnvVarValue5') if pv_IncludeEnvVarName5 is not None and pv_IncludeEnvVarName5 != "": if include_env_vars_dict is None: include_env_vars_dict = {} include_env_vars_dict[ pv_IncludeEnvVarName5] = pv_IncludeEnvVarValue5 pv_ExcludeEnvVars = self.get_parameter_value('ExcludeEnvVars') exclude_env_vars_list = None if pv_ExcludeEnvVars is not None and pv_ExcludeEnvVars != "": # Have specified environment variables to exclude exclude_env_vars_list = string_util.delimited_string_to_list( pv_ExcludeEnvVars, trim=True) pv_OutputFiles = self.get_parameter_value('OutputFiles') output_files_list = None if pv_OutputFiles is not None and pv_OutputFiles != "": # Have specified output files to add to command output files output_files_list = string_util.delimited_string_to_list( pv_OutputFiles, trim=True) # Expand each output file ifile = -1 for output_file in output_files_list: ifile = ifile + 1 output_files_list[ifile] = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( output_file, self))) logger.info('Command line before expansion="' + pv_CommandLine + '"') # Runtime checks on input command_line_expanded = self.command_processor.expand_parameter_value( pv_CommandLine, self) if warning_count > 0: message = "There were " + str( warning_count) + " warnings about command parameters." logger.warning(message) raise ValueError(message) # Run the program as a subprocess try: logger.info('Running command line "' + command_line_expanded + '"') # Create the environment dictionary env_dict = self.create_env_dict(include_parent_env_vars, include_env_vars_dict, exclude_env_vars_list) print("env_dict=" + string_util.format_dict(env_dict)) # TODO smalers 2018-12-16 evaluate using shlex.quote() to handle command string # TODO smalers 2018-12-16 handle standard input and output p = subprocess.Popen(command_line_expanded, shell=use_command_shell, env=env_dict) # Wait for the process to terminate since need it to be done before other commands do their work # with the command output. p.wait() return_status = p.poll() if return_status != 0: warning_count += 1 message = 'Nonzero return status running program "' + command_line_expanded + '"' logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) except Exception as e: warning_count += 1 message = 'Unexpected error running program "' + command_line_expanded + '"' logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) except: warning_count += 1 message = 'Unexpected error running program "' + command_line_expanded + '"' logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) # If any output files were indicated, add to the command output if they exist if output_files_list is not None and len(output_files_list) > 0: for output_file in output_files_list: if os.path.isfile(output_file): # Add the log file to output self.command_processor.add_output_file(output_file) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." logger.warning(message) raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class ReadGeoLayerFromDelimitedFile(AbstractCommand): """ Reads a GeoLayer from a delimited spatial data file. This command reads a layer from a delimited file and creates a GeoLayer object within the geoprocessor. The GeoLayer can then be accessed in the geoprocessor by its identifier and further processed. GeoLayers read from a delimited file hold point features. It is required that the delimited file has a column representing each feature's x coordinates and a column representing each feature's y coordinates. The other columns within the delimited file, if any, are included in the GeoLayer's attribute tables as individual attributes. In order for the geoprocessor to use and manipulate spatial data files, GeoLayers are instantiated as `QgsVectorLayer <https://qgis.org/api/classQgsVectorLayer.html>`_ objects. Command Parameters * DelimitedFile (str, required): The path (relative or absolute) to the delimited file to be read. * GeometryFormat (str, required): The geometry representation used within the delimited file. Must either be `XY` or `WKT`. * XColumn (str, required if GeometryFormat is `XY`): The name of the delimited file column that holds the x coordinate data. * YColumn (str, required if GeometryFormat is `XY`): The name of the delimited file column that holds the y coordinate data. * WKTColumn (str, required if GeometryFormat is `WKT`): The name of the delimited file column that holds teh WKT formatted geometries. * CRS (str, required): The coordinate reference system associated with the X and Y coordinates (in EPSG code). * Delimiter (str, optional): The delimiter symbol used in the delimited file. Defaulted to comma. * GeoLayerID (str, optional): the GeoLayer identifier. If None, the spatial data filename (without the .geojson extension) will be used as the GeoLayer identifier. For example: If GeoLayerID is None and the absolute pathname to the spatial data file is C:/Desktop/Example/example_file.geojson, then the GeoLayerID will be `example_file`. * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the CopiedGeoLayerID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("DelimitedFile", type("")), CommandParameterMetadata("GeometryFormat", type("")), CommandParameterMetadata("XColumn", type("")), CommandParameterMetadata("YColumn", type("")), CommandParameterMetadata("WKTColumn", type("")), CommandParameterMetadata("CRS", type("")), CommandParameterMetadata("Delimiter", type("")), CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("IfGeoLayerIDExists", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "ReadGeoLayerFromDelimitedFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "This command reads a GeoLayer from a delimited file." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # DelimitedFile self.parameter_input_metadata[ 'DelimitedFile.Description'] = "delimited file to read" self.parameter_input_metadata['DelimitedFile.Label'] = "Delimited file" self.parameter_input_metadata['DelimitedFile.Required'] = True self.parameter_input_metadata['DelimitedFile.Tooltip'] =\ "The delimited file to read (relative or absolute path). ${Property} syntax is recognized." self.parameter_input_metadata[ 'DelimitedFile.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'DelimitedFile.FileSelector.Title'] = "Select a delimited file to read." # CRS self.parameter_input_metadata[ 'CRS.Description'] = "coordinate reference system" self.parameter_input_metadata['CRS.Label'] = "CRS" self.parameter_input_metadata['CRS.Required'] = True self.parameter_input_metadata['CRS.Tooltip'] = ( "The coordinate reference system of the geometry in the delimited file.\n" "EPSG or ESRI code format required (e.g. EPSG:4326, EPSG:26913, ESRI:102003)." ) # GeometryFormat self.parameter_input_metadata['GeometryFormat.Description'] =\ "the geometry representation in the delimited file" self.parameter_input_metadata[ 'GeometryFormat.Label'] = "Geometry format" self.parameter_input_metadata['GeometryFormat.Required'] = True self.parameter_input_metadata['GeometryFormat.Tooltip'] = ( "The geometry representation in the delimited file. " "Must be one of the following options: \n" "XY: The geometry is stored in two columns of the delimited file. " "One column holds the X coordinates and the other column holds the Y coordinates. \n" "WKT: The geometry is stored in one column of the delimited file." "The geometry is in Well Known Text representation.") self.parameter_input_metadata['GeometryFormat.Values'] = [ "", "XY", "Y", "WKT" ] # XColumn self.parameter_input_metadata[ 'XColumn.Description'] = "column name for X coordinate" self.parameter_input_metadata['XColumn.Label'] = "X column" self.parameter_input_metadata['XColumn.Tooltip'] =\ "The name of the column in the delimited file that holds the X coordinate data." self.parameter_input_metadata['XColumn.Value.Default.Description'] =\ "If GeometryFormat is XY, this parameter is required. Otherwise this parameter is ignored." # YColumn self.parameter_input_metadata[ 'YColumn.Description'] = "column name for Y coordinate" self.parameter_input_metadata['YColumn.Label'] = "Y column" self.parameter_input_metadata['YColumn.Tooltip'] =\ "The name of the column in the delimited file that holds the Y coordinate data." self.parameter_input_metadata['YColumn.Value.Default'] =\ "If GeometryFormat is XY, this parameter is required. Otherwise, this parameter is ignored." # WKTColumn self.parameter_input_metadata[ 'WKTColumn.Description'] = "column name for WKT geometry data" self.parameter_input_metadata['WKTColumn.Label'] = "WKT column" self.parameter_input_metadata['WKTColumn.Tooltip'] =\ "The name of the column in the delimited file that holds the WKT geometry data." self.parameter_input_metadata['WKTColumn.Value.Default.Description'] =\ "If GeometryFormat is WKT, this parameter is required. Otherwise, this parameter is ignored." # Delimiter self.parameter_input_metadata[ 'Delimiter.Description'] = "delimiter character" self.parameter_input_metadata['Delimiter.Label'] = "Delimiter" self.parameter_input_metadata['Delimiter.Tooltip'] =\ "The delimiter used to separate the columns of the delimited file." self.parameter_input_metadata['Delimiter.Value.Default'] = "," # GeoLayerID self.parameter_input_metadata[ 'GeoLayerID.Description'] = "output GeoLayer identifier" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Tooltip'] =\ "A GeoLayer identifier. Formatting characters are recognized." self.parameter_input_metadata['GeoLayerID.Value.Default'] = ( "The delimited filename without the leading path and without the file extension " \ "(equivalent to formatting character %f).") # IfGeoLayerIDExists self.parameter_input_metadata[ 'IfGeoLayerIDExists.Description'] = "action if GeoLayerID exists" self.parameter_input_metadata[ 'IfGeoLayerIDExists.Label'] = "If GeoLayerID exists" self.parameter_input_metadata['IfGeoLayerIDExists.Tooltip'] = ( "The action that occurs if the OutputGeoLayerID already exists within the GeoProcessor.\n" "Replace : The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer." "No warning is logged.\n" "ReplaceAndWarn: The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "A warning is logged. \nWarn : The new GeoLayer is not created. A warning is logged. \n" "Fail : The new GeoLayer is not created. A fail message is logged." ) self.parameter_input_metadata['IfGeoLayerIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfGeoLayerIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that the appropriate parameters have a string value. for parameter in ['DelimitedFile', 'Delimiter', 'CRS', 'GeoLayerID']: # Get the parameter value. parameter_value = self.get_parameter_value( parameter_name=parameter, command_parameters=command_parameters) # Check that the parameter value is a non-empty, non-None string. If not, raise a FAILURE. if not validators.validate_string(parameter_value, False, False): message = "{} parameter has no value.".format(parameter) recommendation = "Specify the {} parameter.".format(parameter) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that the GeometryFormat is either `XY` or `WKT`. pv_GeometryFormat = self.get_parameter_value( parameter_name="GeometryFormat", command_parameters=command_parameters) acceptable_values = ["WKT", "XY"] if not validators.validate_string_in_list(pv_GeometryFormat, acceptable_values, none_allowed=False, empty_string_allowed=False, ignore_case=True): message = "GeometryFormat parameter value ({}) is not recognized.".format( pv_GeometryFormat) recommendation = "Specify one of the acceptable values ({}) for the GeometryFormat parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that the correct ColumnName variables are correct. else: # If the pv_GeometryFormat is "WKT" then check that the WKTColumn has a string value. if pv_GeometryFormat is not None and pv_GeometryFormat.upper( ) == "WKT": # Check that the parameter value is a non-None string. If not, raise a FAILURE. if not validators.validate_string("WKTColumn", False, True): message = "WKTColumn parameter has no value." recommendation = "Specify the WKTColumn parameter." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: # Check that the appropriate parameters have a string value. for parameter in ['XColumn', 'YColumn']: # Get the parameter value. parameter_value = self.get_parameter_value( parameter_name=parameter, command_parameters=command_parameters) # Check that the parameter value is a non-None string. If not, raise a FAILURE. if not validators.validate_string(parameter_value, False, True): message = "{} parameter has no value.".format( parameter) recommendation = "Specify the {} parameter.".format( parameter) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfGeoLayerIDExists is either `Replace`, `ReplaceAndWarn`, `Warn`, `Fail`, None. pv_IfGeoLayerIDExists = self.get_parameter_value( parameter_name="IfGeoLayerIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] if not validators.validate_string_in_list(pv_IfGeoLayerIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfGeoLayerIDExists parameter value ({}) is not recognized.".format( pv_IfGeoLayerIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfGeoLayerIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_read_geolayer(self, delimited_file, delimiter, geom_format, x_col, y_col, wkt_col, crs, geolayer_id): """ Checks the following: * the DelimitedFile (absolute) is a valid file * if the CSV is using XY coordinates * -- > the XColumn is an actual field name * -- > the YColumn is an actual field name * if the CSV if using WKT geometries * -- > the WKTColumn is an actual field name * the CRS code is a valid code * the ID of the output GeoLayer is unique (not an existing GeoLayer ID) Args: * delimited_file (str, required): The absolute path to the delimited file to be read. * delimiter (str, required): The delimiter symbol used in the delimited file. Often times a comma. * geom_format (str): The format of the geometry representation in the delimited file. Either `WKT` or `XY`. * x_col (str): The name of the delimited file column that holds the x coordinate data. * y_col (str): The name of the delimited file column that holds the y coordinate data. * crs (str, EPSG format): The coordinate reference system code associated with the X and Y coordinates. * geolayer_id (str): the GeoLayer identifier. Returns: Boolean. If TRUE, the geolayer should be read. If FALSE, the geolayer should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input DelimitedFile is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "DelimitedFile", delimited_file, "FAIL")) # If the Delimited File exists, continue with the following checks. if should_run_command[0] is True: # If the geometry format is "XY", continue. if geom_format.upper() == "XY": # If the XColumn is not an existing column name in the delimited file, raise a FAILURE. should_run_command.append( validators.run_check( self, "IsDelimitedFileColumnNameValid", "XColumn", x_col, "FAIL", other_values=[delimited_file, delimiter])) # If the YColumn is not an existing column name in the delimited file, raise a FAILURE. should_run_command.append( validators.run_check( self, "IsDelimitedFileColumnNameValid", "YColumn", y_col, "FAIL", other_values=[delimited_file, delimiter])) # If the geometry format is "WKT", continue. else: # If the WKTColumn is not an existing column name in the delimited file, raise a FAILURE. should_run_command.append( validators.run_check( self, "IsDelimitedFileColumnNameValid", "WKTColumn", wkt_col, "FAIL", other_values=[delimited_file, delimiter])) # If the input CRS code is not a valid coordinate reference code, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsCrsCodeValid", "CRS", crs, "FAIL")) # If the GeoLayer ID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends on the # value of the IfGeoLayerIDExists parameter.) The required, the IfGeoLayerIDExists parameter value is retrieved # inside run_check function. should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "GeoLayerID", geolayer_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Read the layer file from a delimited file, create a GeoLayer object, and add to the GeoProcessor's geolayer list. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_DelimitedFile = self.get_parameter_value("DelimitedFile") pv_Delimiter = self.get_parameter_value("Delimiter", default_value=',') pv_GeometryFormat = self.get_parameter_value("GeometryFormat") pv_XColumn = self.get_parameter_value("XColumn", default_value=None) pv_YColumn = self.get_parameter_value("YColumn", default_value=None) pv_WKTColumn = self.get_parameter_value("WKTColumn", default_value=None) pv_CRS = self.get_parameter_value("CRS") pv_GeoLayerID = self.get_parameter_value("GeoLayerID", default_value='%f') # Convert the DelimitedFile parameter value relative path to an absolute path and expand for ${Property} # syntax delimited_file_abs = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_DelimitedFile, self))) # If the pv_GeoLayerID is a valid %-formatter, assign the pv_GeoLayerID the corresponding value. if pv_GeoLayerID in ['%f', '%F', '%E', '%P', '%p']: pv_GeoLayerID = io_util.expand_formatter(delimited_file_abs, pv_GeoLayerID) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_read_geolayer(delimited_file_abs, pv_Delimiter, pv_GeometryFormat, pv_XColumn, pv_YColumn, pv_WKTColumn, pv_CRS, pv_GeoLayerID): try: if pv_GeometryFormat.upper() == "XY": # Create a QGSVectorLayer object with the delimited file. qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_delimited_file_xy( delimited_file_abs, pv_Delimiter, pv_CRS, pv_XColumn, pv_YColumn) else: # Create a QGSVectorLayer object with the delimited file. qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_delimited_file_wkt( delimited_file_abs, pv_Delimiter, pv_CRS, pv_WKTColumn) # Create a GeoLayer and add it to the geoprocessor's GeoLayers list. geolayer_obj = GeoLayer(pv_GeoLayerID, qgs_vector_layer, delimited_file_abs) self.command_processor.add_geolayer(geolayer_obj) # Raise an exception if an unexpected error occurs during the process. except Exception as e: self.warning_count += 1 message = "Unexpected error reading GeoLayer {} from delimited file {}.".format( pv_GeoLayerID, pv_DelimitedFile) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class ReadTableFromExcel(AbstractCommand): """ Reads a Table from an Excel file. This command reads a tables from an Excel file and creates a Table object within the geoprocessor. The Table can then be accessed in the geoprocessor by its identifier and further processed. Command Parameters * InputFile (str, required): the relative pathname to the excel data file (known as a workbook) * Worksheet (str, optional): the name of the worksheet to read. Default: the first worksheet is read. * TableID (str, optional): the Table identifier. Default: the Worksheet's name. * IfTableIDExists (str, optional): This parameter determines the action that occurs if the TableID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("InputFile", type("")), CommandParameterMetadata("Worksheet", type("")), CommandParameterMetadata("TableID", type("")), CommandParameterMetadata("IfTableIDExists", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "ReadTableFromExcel" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Read a table from an Excel worksheet." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # InputFile self.parameter_input_metadata[ 'InputFile.Description'] = "Excel file to read" self.parameter_input_metadata['InputFile.Label'] = "Input file" self.parameter_input_metadata['InputFile.Required'] = True self.parameter_input_metadata['InputFile.Tooltip'] = ( "The Excel workbook file (.xls or .xlsx) with the Excel worksheet to read (relative or absolute path).\n" "${Property} syntax is recognized.") self.parameter_input_metadata['InputFile.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'InputFile.FileSelector.Title'] = "Select an Excel file to read" # Worksheet self.parameter_input_metadata[ 'Worksheet.Description'] = "Excel worksheet" self.parameter_input_metadata['Worksheet.Label'] = "Worksheet" self.parameter_input_metadata['Worksheet.Tooltip'] = \ "The name of the Excel worksheet within the Excel workbook to read." self.parameter_input_metadata[ 'Worksheet.Value.Default'] = "The first worksheet in the Excel workbook." # TODO jurentie 01/22/2019 is this a read file? # TableID self.parameter_input_metadata[ 'TableID.Description'] = "output table identifier" self.parameter_input_metadata['TableID.Label'] = "TableID" self.parameter_input_metadata['TableID.Tooltip'] = "A Table identifier" self.parameter_input_metadata[ 'TableID.Value.Default.Description'] = "worksheet name" # IfTableIDExists self.parameter_input_metadata[ 'IfTableIDExists.Description'] = "action if the TableID exists" self.parameter_input_metadata[ 'IfTableIDExists.Label'] = "If table exists" self.parameter_input_metadata['IfTableIDExists.Tooltip'] = ( "The action that occurs if the TableID already exists within the GeoProcessor.\n" "Replace : The existing Table within the GeoProcessor is overwritten with the new Table. " "No warning is logged.\n" "ReplaceAndWarn: The existing Table within the GeoProcessor is overwritten with the new Table. " "A warning is logged.\n" "Warn : The new Table is not created. A warning is logged.\n" "Fail : The new Table is not created. A fail message is logged.") self.parameter_input_metadata['IfTableIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfTableIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter InputFile is a non-empty, non-None string. # - existence of the file will also be checked in run_command(). pv_InputFile = self.get_parameter_value( parameter_name='InputFile', command_parameters=command_parameters) if not validators.validate_string(pv_InputFile, False, False): message = "InputFile parameter has no value." recommendation = "Specify the InputFile parameter to indicate the input Excel data file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfTableIDExists is either `Replace`, `ReplaceAndWarn`, `Warn`, `Fail`, None. pv_IfTableIDExists = self.get_parameter_value( parameter_name="IfTableIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] if not validators.validate_string_in_list(pv_IfTableIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfTableIDExists parameter value ({}) is not recognized.".format( pv_IfTableIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfTableIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_read_table(self, file_abs, sheet_name, table_id): """ Checks the following: * the InputFile (absolute) is a valid file * the Worksheet is a valid sheet in the Excel workbook * the ID of the Table is unique (not an existing Table ID) Args: file_abs (str): the full pathname to the input data file (Excel workbook) sheet_name (str): the name of the Excel worksheet to read table_id (str): the ID of the output Table Returns: Boolean. If TRUE, the GeoLayer should be read. If FALSE, at least one check failed and the GeoLayer should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input file is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "InputFile", file_abs, "FAIL")) # If the input file is valid, continue with the checks. if False not in should_run_command: # If the Worksheet parameter is None, assign it with the name of the first worksheet in the excel file. if sheet_name is None: sheet_name = pandas_util.create_excel_workbook_obj( file_abs).sheet_names[0] # If the input sheet name is not a valid sheet name in the excel workbook file, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsExcelSheetNameValid", "Worksheet", sheet_name, "FAIL", other_values=[file_abs])) # If the TableID parameter is None, assign the parameter with the sheet name. if table_id is None: table_id = sheet_name # If the TableID is the same as an already-existing TableID, raise a WARNING or FAILURE (depends on the # value of the IfTableIDExists parameter.) should_run_command.append( validators.run_check(self, "IsTableIdUnique", "TableID", table_id, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Read the tabular data from the Excel workbook/worksheet. Create a Table object, and add to the GeoProcessor's tables list. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_InputFile = self.get_parameter_value("InputFile") pv_Worksheet = self.get_parameter_value("Worksheet") pv_TableID = self.get_parameter_value("TableID") # Convert the InputFile parameter value relative path to an absolute path and expand for ${Property} syntax. file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_InputFile, self))) # If the pv_TableID is a valid %-formatter, assign the pv_GeoLayerID the corresponding value. if pv_TableID in ['%f', '%F', '%E', '%P', '%p']: pv_TableID = io_util.expand_formatter(file_absolute, pv_TableID) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_read_table(file_absolute, pv_Worksheet, pv_TableID): try: # Assign the Worksheet parameter to the name of the first Excel worksheet, if it was not specified. if pv_Worksheet is None: pv_Worksheet = pandas_util.create_excel_workbook_obj( file_absolute).sheet_names[0] # Assign the TableID parameter to the name of the first Excel worksheet, if it was not specified. if pv_TableID is None: pv_TableID = pv_Worksheet # Create a Pandas Data Frame object. df = pandas_util.create_data_frame_from_excel( file_absolute, pv_Worksheet) # Create a Table and add it to the geoprocessor's Tables list. table_obj = Table(pv_TableID, df, file_absolute) self.command_processor.add_table(table_obj) # Raise an exception if an unexpected error occurs during the process. except Exception as e: self.warning_count += 1 message = "Unexpected error reading Table {} from Excel file {}.".format( pv_TableID, pv_InputFile) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class For(AbstractCommand): """ The For command starts a For block. """ __command_parameter_metadata = [ CommandParameterMetadata("Name", type("")), CommandParameterMetadata("IteratorProperty", type("")), # Use strings for sequence because could be integer, decimal, or string list. CommandParameterMetadata("SequenceStart", type("")), CommandParameterMetadata("SequenceEnd", type("")), CommandParameterMetadata("SequenceIncrement", type("")), # Specify the list property to use for iteration CommandParameterMetadata("ListProperty", type("")), # Specify the following to iterate over a table. CommandParameterMetadata("TableID", type("")), CommandParameterMetadata("TableColumn", type("")), CommandParameterMetadata("TablePropertyMap", type("")), ] def __init__(self): """ Initialize an instance of the command. """ super().__init__() # AbstractCommand data self.command_name = "For" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "This command repeatedly runs commands between For and EndFor being executed." "The For loop is exited when input is completely processed.\n" "The loop can iterate over one of the following:\n" " - sequence of numbers\n" " - list of strings from a property containing the list of strings\n" " - values from a column in a table (optionally, while setting other values as properties)" ) self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # Name self.parameter_input_metadata[ 'Name.Description'] = "the name of the For loop" self.parameter_input_metadata['Name.Label'] = "Name" self.parameter_input_metadata['Name.Required'] = True self.parameter_input_metadata['Name.Tooltip'] = ( "The name of the for loop, which will be matched with the name of an\n" "EndFor command to indicate the block of commands in the loop.") # IteratorProperty self.parameter_input_metadata['IteratorProperty.Description'] =\ "the processor property that will be set to the iterator property" self.parameter_input_metadata[ 'IteratorProperty.Label'] = "Iterator property" self.parameter_input_metadata['IteratorProperty.Tooltip'] = ( "The processor property that will be set to the iterator property. The object type will depend on that\n" "used to provide the iteration property list. For example, if a sequence of integers is being iterated,\n" "the property will contain an integer.") self.parameter_input_metadata[ 'IteratorProperty.Value.Default.Description'] = "same as Name" # TODO jurentie 01/23/19 how to handle these different cases? maybe needs the tabbed options # If iterating over a LIST ... # ListProperty self.parameter_input_metadata[ 'ListProperty.Description'] = "property containing list of strings" self.parameter_input_metadata['ListProperty.Label'] = "List property" self.parameter_input_metadata['ListProperty.Tooltip'] = \ "Specify if the list is iterating over a property that contains a list of strings." self.parameter_input_metadata['ListProperty.Value.Default.Description'] =\ "specify this or 'Sequence*' parameters" # If iterating over a SEQUENCE ... # SequenceStart self.parameter_input_metadata['SequenceStart.Description'] =\ "starting value when a sequence is specified for iteration" self.parameter_input_metadata['SequenceStart.Label'] = "Sequence start" self.parameter_input_metadata['SequenceStart.Tooltip'] = ( "Starting value when a sequence is specified for iteration, an integer or floating-point number " "(with decimal).") self.parameter_input_metadata[ 'SequenceStart.Value.Default'] = "No default if sequence is used" # SequenceEnd self.parameter_input_metadata['SequenceEnd.Description'] =\ "ending value when a sequence is specified for iteration" self.parameter_input_metadata['SequenceEnd.Label'] = "Sequence end" self.parameter_input_metadata['SequenceEnd.Tooltip'] = ( "Ending value when a sequence is specified for iteration, an integer or floating-point number " "(with decimal).") self.parameter_input_metadata[ 'SequenceEnd.Value.Default.Description'] = "No default if sequence is used" # SequenceIncrement self.parameter_input_metadata[ 'SequenceIncrement.Description'] = "increment for sequence iterator" self.parameter_input_metadata[ 'SequenceIncrement.Label'] = "Sequence increment" self.parameter_input_metadata[ 'SequenceIncrement.Tooltip'] = "Increment for sequence iterator." self.parameter_input_metadata['SequenceIncrement.Value.Default.Description'] = \ "1 or 1.0 depending on type for SequenceStart" # If iterating over TABLE... # TableID self.parameter_input_metadata[ 'TableID.Description'] = "the table identifier" self.parameter_input_metadata['TableID.Label'] = "TableID" self.parameter_input_metadata['TableID.Tooltip'] = \ "The table identifier, when specifying the iterator as a column from a table. ${Property} syntax is " \ "recognized." self.parameter_input_metadata[ 'TableID.Value.Default'] = "No default if table is used" # TableColumn self.parameter_input_metadata[ 'TableColumn.Description'] = "the table column name" self.parameter_input_metadata['TableColumn.Label'] = "Table column" self.parameter_input_metadata['TableColumn.Tooltip'] = ( "The table column name, when specifying the iterator as a column from a table. ${Property} syntax is " "recognized.") self.parameter_input_metadata[ 'TableColumn.Value.Default.Description'] = "No default if table is used" # TablePropertyMap self.parameter_input_metadata['TablePropertyMap.Description'] =\ "use to set properties from table data" self.parameter_input_metadata[ 'TablePropertyMap.Label'] = "Table property map" self.parameter_input_metadata['TablePropertyMap.Tooltip'] = ( "Specify the names of column names and corresponding processor property names to set.\n" "This allows other commands to access the values of those properties using ${Property} syntax.\n\n" "Specify using format:\n" "ColumnName1:PropertyName1,ColumnName2:PropertyName2") self.parameter_input_metadata['TablePropertyMap.Value.Default.Description'] = \ "None - only the iterator column value will be set as a property using IteratorProperty" # Local data self.for_initialized = False # For loop is not initialized, will be initialized in first next() call # Iterator core data # - used with all iterator types self.iterator_object = None # The current object for the iterator self.iterator_object_list_index = None # The index in the iterator object list, 0-reference self.iterator_property = None # The name of the property that set for the iterator variable # Utility data to simplify checks for type of iteration (only one should be set to True) self.iterator_is_list = False self.iterator_is_sequence = False self.iterator_is_table = False # Used with sequence iterator self.iterator_sequence_start = None self.iterator_sequence_end = None self.iterator_sequence_increment = None # Used with list iterator self.iterator_list = None # The list of objects, typically str, being iterated over # Used with table iterator self.table = None self.table_column = None self.table_property_map = None def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" logger = logging.getLogger(__name__) # Unlike most commands, set internal data here because it is needed by initial call to next() # before calls to run_command # Options for iterating, will be changed based on parameters that are set self.iterator_is_list = False self.iterator_is_sequence = False self.iterator_is_table = False option_count = 0 # How many iteration options are specified (should only be 1) # Name is required pv_Name = self.get_parameter_value( parameter_name='Name', command_parameters=command_parameters) if not validators.validate_string(pv_Name, False, False): message = "A name for the For block must be specified" recommendation = "Specify the Name." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # -------------------------------- # Iterator option 1 - use a sequence # SequenceStart is currently required since no other iteration types are implemented pv_SequenceStart = self.get_parameter_value( parameter_name='SequenceStart', command_parameters=command_parameters) if pv_SequenceStart is not None and pv_SequenceStart != "": self.iterator_is_sequence = True # Will be checked below to make sure only one option is used option_count += 1 if not validators.validate_number(pv_SequenceStart, False, False): message = "The SequenceStart value must be specified as a number" recommendation = "Specify the SequenceStart as a number." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: if pv_SequenceStart.find(".") >= 0: # Decimal self.iterator_sequence_start = float(pv_SequenceStart) else: # Assume integer self.iterator_sequence_start = int(pv_SequenceStart) # The other sequence parameters only make sense if the start is specified # SequenceEnd is currently required since no other iteration types are implemented pv_SequenceEnd = self.get_parameter_value( parameter_name='SequenceEnd', command_parameters=command_parameters) if not validators.validate_number(pv_SequenceEnd, False, False): message = "The SequenceEnd value must be specified as a number" recommendation = "Specify the SequenceEnd as a number." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: if pv_SequenceEnd.find(".") >= 0: # Decimal self.iterator_sequence_end = float(pv_SequenceEnd) else: # Assume integer self.iterator_sequence_end = int(pv_SequenceEnd) # SequenceIncrement is currently required since no other iteration types are implemented pv_SequenceIncrement = self.get_parameter_value( parameter_name='SequenceIncrement', command_parameters=command_parameters) if not validators.validate_number(pv_SequenceIncrement, False, False): message = "The SequenceIncrement value must be specified as a number" recommendation = "Specify the SequenceIncrement as a number." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: if pv_SequenceIncrement.find(".") >= 0: # Decimal self.iterator_sequence_increment = float( pv_SequenceIncrement) else: # Assume integer self.iterator_sequence_increment = int( pv_SequenceIncrement) # -------------------------------- # Iterator option 2 - use a processor property that contains a list pv_ListProperty = self.get_parameter_value( parameter_name='ListProperty', command_parameters=command_parameters) if pv_ListProperty is not None and pv_ListProperty != "": self.iterator_is_list = True # Will be checked below to make sure only one option is used option_count += 1 # No further validation is done - ListProperty property must be defined at run time # -------------------------------- # Iterator option 3 - use a table pv_TableID = self.get_parameter_value( parameter_name='TableID', command_parameters=command_parameters) if pv_TableID is not None and pv_TableID != "": self.iterator_is_table = True option_count += 1 # TableColumn is required pv_TableColumn = self.get_parameter_value( parameter_name='TableColumn', command_parameters=command_parameters) if not validators.validate_string(pv_TableColumn, False, False): message = "The TableColumn parameter must be specified" recommendation = "Specify the TableColumn." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # -------------------------------- # Only allow one of the iteration properties to be specified because otherwise the command will be confused. if option_count > 1: message = "Parameters for multiple iterator types have been specified." recommendation = "Specify parameters for only one iteration type." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Allow multiple iteration types to be set since they will be processed in order when running # and the preferred approach will take precedent # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception if len(warning) > 0: logger.warn(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def get_name(self): """ Return the name of the For (will match name of corresponding EndFor). Returns: The name of the For (will match name of corresponding EndFor). """ return self.get_parameter_value("Name") def next(self): """ Increment the loop counter. If called the first time, initialize. This may be called before run_commands() in the processor so process properties here. Returns: If the increment will go past the end, return False. Otherwise, return True. """ debug = True logger = logging.getLogger(__name__) if not self.for_initialized: # Initialize the loop logger.info("Initializing For() command") # Set the initialization here because exceptions could cause it to not get set and then # an infinite loop results self.for_initialized = True if self.iterator_is_list: logger.info("Initializing For() command for a list.") # Initialize the loop self.__set_iterator_property_value(None) self.command_status.clear_log(CommandPhaseType.RUN) try: # This would normally be done in run_command(), but that function is not called like other commands self.iterator_object_list_index = 0 pv_ListProperty = self.get_parameter_value('ListProperty') self.iterator_list = self.command_processor.get_property( pv_ListProperty) if self.iterator_list is None: message = 'For command iterator list property "' + pv_ListProperty + '" is not defined.' recommendation = "Confirm that the list property has values." logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: self.iterator_object = self.iterator_list[ self.iterator_object_list_index] # if ( Message.isDebugOn ) if debug: logger.info( "Initialized iterator object to first item in list: " + str(self.iterator_object)) return True except: # message = "Error initializing For() iterator to initial value in list (" + e + ")."; message = "Error initializing For() iterator initial value to first item in list" logger.warning(message, exc_info=True) raise ValueError(message) elif self.iterator_is_sequence: # Iterating on a sequence logger.info("Initializing For() command for a sequence.") # Initialize the loop self.__set_iterator_property_value(None) self.command_status.clear_log(CommandPhaseType.RUN) try: self.iterator_object_list_index = 0 self.iterator_object = self.iterator_sequence_start if self.iterator_sequence_increment is None: # Default increment is 1 or 1.0 if type(self.iterator_sequence_start) == 'int': self.iterator_sequence_increment = 1 elif type(self.iterator_sequence_start) == 'float': self.iterator_sequence_increment = 1.0 # if ( Message.isDebugOn ) if debug: logger.info( "Initialized iterator object to sequence start: " + str(self.iterator_object)) return True except: # message = "Error initializing For() iterator to initial value (" + e + ")."; message = "Error initializing For() iterator initial value to sequence start" logger.warning(message, exc_info=True) raise ValueError(message) elif self.iterator_is_table: # Iterating over a table logger.info("Initializing For() command for a table.") # Initialize the loop self.__set_iterator_property_value(None) self.command_status.clear_log(CommandPhaseType.RUN) try: # Get TableID parameter value. If required, expand for ${Property} syntax. pv_TableID = self.get_parameter_value( parameter_name='TableID') pv_TableID = self.command_processor.expand_parameter_value( pv_TableID, self) # Get TableColumn parameter value. If required, expand for ${Property} syntax. pv_TableColumn = self.get_parameter_value( parameter_name='TableColumn') pv_TableColumn = self.command_processor.expand_parameter_value( pv_TableColumn, self) # Get the table pandas data frame object self.table = self.command_processor.get_table(pv_TableID) # Get the TablePropertyMap pv_TablePropertyMap = self.get_parameter_value( parameter_name='TablePropertyMap') # Assign as class variable after converting from string to dictionary self.table_property_map = string_util.delimited_string_to_dictionary_one_value( pv_TablePropertyMap, entry_delimiter=",", key_value_delimiter=":", trim=False) # Get the values of the input column as a list self.iterator_object_list_index = 0 self.iterator_list = self.table.get_column_values_as_list( pv_TableColumn) if self.iterator_list is None: message = 'For command iterator table column "' + pv_TableColumn + '" is not defined.' recommendation = "Confirm that the table column has values." logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: self.iterator_object = self.iterator_list[ self.iterator_object_list_index] # Set the other property values if configured. if self.table_property_map: self.__next_set_properties_from_table() # if ( Message.isDebugOn ) if debug: logger.info( "Initialized iterator object to first item in list: " + str(self.iterator_object)) return True except: # message = "Error initializing For() iterator to initial value (" + e + ")."; message = "Error initializing For() iterator initial value to sequence start" logger.warning(message, exc_info=True) raise ValueError(message) else: # TODO smalers 2017-12-21 need to throw exception pass else: # Increment the iterator property # - optionally set additional properties from other table columns (tables not yet supported) if self.iterator_is_list: # If the iterator object is already at or will exceed the list length if incremented, then done iterating # - len() is 1-based, index is 0-based if self.iterator_object_list_index >= ( len(self.iterator_list) - 1): logger.info( "Iterator has reached list end. Returning False from next()." ) return False else: # Iterate by incrementing list index and returning corresponding list object self.iterator_object_list_index += 1 self.iterator_object = self.iterator_list[ self.iterator_object_list_index] logger.info("Iterator value is now " + str(self.iterator_object) + ". Returning True from next().") return True elif self.iterator_is_sequence: # If the iterator object is already at or will exceed the maximum, then done iterating # TODO smalers 2017-12-21 verify that Python handles typing automatically for integers and doubles # if (((type(self.iterator_sequence_start) == 'int') && if self.iterator_object >= self.iterator_sequence_end or \ (self.iterator_object + self.iterator_sequence_increment) > self.iterator_sequence_end: logger.info( "Iterator has reached end value. Returning False from next()." ) return False else: # Iterate by adding increment to iterator object # TODO smalers 2017-12-21 verify that Python handles typing automatically for integers and doubles self.iterator_object = self.iterator_object + self.iterator_sequence_increment logger.info("Iterator value is now " + str(self.iterator_object) + ". Returning True from next().") return True elif self.iterator_is_table: # If the iterator object is already at or will exceed the list length if incremented, then done iterating # - len() is 1-based, index is 0-based if self.iterator_object_list_index >= ( len(self.iterator_list) - 1): logger.info( "Iterator has reached list end. Returning False from next()." ) return False else: # Iterate by incrementing list index and returning corresponding list object self.iterator_object_list_index += 1 self.iterator_object = self.iterator_list[ self.iterator_object_list_index] logger.info("Iterator value is now " + str(self.iterator_object) + ". Returning True from next().") # Set the other property values if configured. if self.table_property_map: self.__next_set_properties_from_table() return True else: # Iteration type not recognized so jump out right away to avoid infinite loop return True def reset_command(self): """ Reset the command to uninitialized state. This is needed to ensure that re-executing commands will restart the loop on the first call to next(). """ logger = logging.getLogger(__name__) self.for_initialized = False logger.info('Reset For loop to uninitialized') def run_command(self): """ Run the command. This initializes the iterator data for use when next() is called by the processor. Returns: None. """ logger = logging.getLogger(__name__) logger.info("In For.run_command") pv_Name = self.get_parameter_value('Name') pv_IteratorProperty = self.get_parameter_value('IteratorProperty') if pv_IteratorProperty is None or pv_IteratorProperty == "": # Default to same as Name pv_IteratorProperty = pv_Name self.iterator_property = pv_IteratorProperty # ------------------------------------------------------------------------------- # Properties used when iterating over a sequence of integers or decimal numbers # ------------------------------------------------------------------------------- pv_SequenceStart = self.get_parameter_value('SequenceStart') if pv_SequenceStart is not None and pv_SequenceStart != "": if pv_SequenceStart.find(".") >= 0: # Decimal self.iterator_sequence_start = float(pv_SequenceStart) else: # Assume integer self.iterator_sequence_start = int(pv_SequenceStart) pv_SequenceEnd = self.get_parameter_value('SequenceEnd') if pv_SequenceEnd.find(".") >= 0: # Decimal self.iterator_sequence_end = float(pv_SequenceEnd) else: # Assume integer self.iterator_sequence_end = int(pv_SequenceEnd) pv_SequenceIncrement = self.get_parameter_value( 'SequenceIncrement') if pv_SequenceIncrement is None or pv_SequenceIncrement == "": pv_SequenceIncrement = "1" # Default if pv_SequenceIncrement.find(".") >= 0: # Decimal self.iterator_sequence_increment = float(pv_SequenceIncrement) else: # Assume integer self.iterator_sequence_increment = int(pv_SequenceIncrement) self.iterator_is_list = False self.iterator_is_sequence = True self.iterator_is_table = False # ------------------------------------------------------------------------------- # Properties used when iterating over a list of values # - initially str is used in testing but may support list of numbers # ------------------------------------------------------------------------------- pv_ListProperty = self.get_parameter_value('ListProperty') if pv_ListProperty is not None: # Iterating over a list, given by the property self.iterator_list = self.command_processor.get_property( pv_ListProperty) if self.iterator_list is None: message = 'For command iterator list property "' + pv_ListProperty + '" is not defined.' recommendation = "Confirm that the list property has values." logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) self.iterator_is_list = True self.iterator_is_sequence = False self.iterator_is_table = False # ------------------------------------------------------------------------------- # Properties used when iterating over a table # ------------------------------------------------------------------------------- # next() will have been called by the command processor so at this point just set the processor property. # Set the basic property as well as the property with 0 and 1 at end of name indicating zero and 1 offset. self.command_processor.set_property(pv_IteratorProperty, self.iterator_object) def __set_iterator_property_value(self, iterator_property_value): """ Set the value of the iterator property (index), used when iterating over a list. Args: iterator_property_value: Returns: None. """ self.iterator_object = iterator_property_value def __next_set_properties_from_table(self): # Get the iterator object list index index = self.iterator_object_list_index # Get the pandas data frame object for the table being iterated df = self.table.df # Get the pandas row that is currently being iterated over. row = df.iloc[index] # Iterate over the entries in the table_property_map dictionary. # key is the property name and value is the corresponding column name for column, property in self.table_property_map.items(): # Get the value for the given column and the current row. property_val = row[column] # Assign the geoprocessor property the corresponding value. self.command_processor.set_property(property, property_val)
class SplitGeoLayerByAttribute(AbstractCommand): """ Splits a GeoLayer into multiple GeoLayers based on the GeoLayer's attribute values. This command takes a GeoLayer and an attribute and generates a set of GeoLayers. Each new GeoLayer contains all features from the input GeoLayer with the same value for the specified attribute. The number of GeoLayers generated is equal to the number of unique values found for the specified attribute. Command Parameters * InputGeoLayerID (str, required): the ID of the input GeoLayer, the layer to be split. * AttributeName (str, required): the name of the attribute to split on. Must be a unique attribute name of the GeoLayer. * OutputGeoLayerIDs (str, optional): the IDs of the GeoLayers created as the output split layers. By default the GeoLayerID of the output layers will be {}_splitBy_{} where the first variable is the InputGeoLayerID and the second variable is the AttributeName value. * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the OutputGeoLayerIDs already exist within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("InputGeoLayerID", type("")), CommandParameterMetadata("AttributeName", type("")), CommandParameterMetadata("OutputGeoLayerIDs", type("")), CommandParameterMetadata("IfGeoLayerIDExists", type("")), CommandParameterMetadata("TemporaryFolder", type("")) ] #CommandParameterMetadata("RemoveTemporaryFiles", type(""))] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "SplitGeoLayerByAttribute" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = "This command splits an input GeoLayer by unique values " \ "of an attribute." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # InputGeoLayerID self.parameter_input_metadata[ 'InputGeoLayerID.Description'] = 'input GeoLayer identifier' self.parameter_input_metadata[ 'InputGeoLayerID.Label'] = "Input GeoLayerID" self.parameter_input_metadata['InputGeoLayerID.Required'] = True self.parameter_input_metadata[ 'InputGeoLayerID.Tooltip'] = "A GeoLayer identifier" # AttributeName self.parameter_input_metadata[ 'AttributeName.Description'] = "attribute name to split by" self.parameter_input_metadata['AttributeName.Label'] = "Attribute Name" self.parameter_input_metadata['AttributeName.Required'] = True self.parameter_input_metadata['AttributeName.Tooltip'] = "The attribute name that will be used to " \ "split the input GeoLayer." # OutputGeoLayerIDs self.parameter_input_metadata[ 'OutputGeoLayerIDs.Description'] = "the identifiers of the output GeoLayers" self.parameter_input_metadata[ 'OutputGeoLayerIDs.Label'] = "Output GeoLayerIDs" self.parameter_input_metadata[ 'OutputGeoLayerIDs.Tooltip'] = "The identifiers of the output GeoLayers." self.parameter_input_metadata[ 'OutputGeoLayerIDs.Value.Default'] = "Default QGIS output file names for layers" # IfGeoLayerIDExists self.parameter_input_metadata[ 'IfGeoLayerIDExists.Description'] = "action if exists" self.parameter_input_metadata[ 'IfGeoLayerIDExists.Label'] = "If GeoLayerID exists" self.parameter_input_metadata['IfGeoLayerIDExists.Tooltip'] = ( "The action that occurs if the GeoLayerID already exists within the GeoProcessor.\n" "Replace : The existing GeoLayer within the GeoProcessor is overwritten with the new" "GeoLayer. No warning is logged.\n" "ReplaceAndWarn: The existing GeoLayer within the GeoProcessor is overwritten with the new " "GeoLayer. A warning is logged. \n" "Warn : The new GeoLayer is not created. A warning is logged. \n" "Fail : The new GeoLayer is not created. A fail message is logged." ) self.parameter_input_metadata['IfGeoLayerIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfGeoLayerIDExists.Value.Default'] = "Replace" # TemporaryFolder self.parameter_input_metadata[ 'TemporaryFolder.Description'] = "temporary location for output files" self.parameter_input_metadata[ 'TemporaryFolder.Label'] = "Temporary Folder" self.parameter_input_metadata['TemporaryFolder.Tooltip'] = \ "sets the temporary folder for files and the default would be whatever QGIS uses. This approach would be" \ "taken for similar commands." self.parameter_input_metadata[ 'TemporaryFolder.Value.Default'] = "default temporary folder directory" self.parameter_input_metadata['TemporaryFolder.FileSelector.Title'] = \ "select the directory to save temp files to" self.parameter_input_metadata[ 'TemporaryFolder.FileSelector.SelectFolder'] = True self.parameter_input_metadata[ 'TemporaryFolder.FileSelector.Type'] = "Write" # RemoveTemporaryFiles # self.parameter_input_metadata['RemoveTemporaryFiles.Description'] = "remove temporary files" # self.parameter_input_metadata['RemoveTemporaryFiles.Label'] = "Remove Temporary Files" # self.parameter_input_metadata['RemoveTemporaryFiles.Tooltip'] = \ # "True (default): remove the temporary files created behind the scenes.\n" \ # "False: leave the files so that they can be reviewed." # self.parameter_input_metadata['RemoveTemporaryFiles.Value.Default'] = "True" # self.parameter_input_metadata['RemoveTemporaryFiles.Values'] = ["", "True", "False"] # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter InputGeoLayerID is a non-empty, non-None string. pv_InputGeoLayerID = self.get_parameter_value( parameter_name='InputGeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_InputGeoLayerID, False, False): message = "InputGeoLayerID parameter has no value." recommendation = "Specify the InputGeoLayerID parameter to indicate the input GeoLayer." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter AttributeName is a non-empty, non-None string. pv_AttributeName = self.get_parameter_value( parameter_name='AttributeName', command_parameters=command_parameters) if not validators.validate_string(pv_AttributeName, False, False): message = "AttributeName parameter has no value." recommendation = "Specify the AttributeName parameter to indicate the attribute to split on." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfGeoLayerIDExists is either `Replace`, `Warn`, `Fail` or None. pv_IfGeoLayerIDExists = self.get_parameter_value( parameter_name="IfGeoLayerIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "Warn", "Fail", "ReplaceAndWarn"] if not validators.validate_string_in_list(pv_IfGeoLayerIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfGeoLayerIDExists parameter value ({}) is not recognized.".format( pv_IfGeoLayerIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfGeoLayerIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def check_command_input(self, input_geolayer_id, attribute_name, output_geolayer_ids): """ Checks the following: * the ID of the input GeoLayer is an existing GeoLayer ID * the attribute name is a valid name for the GeoLayer (if not, log an error message & do not continue.) * the IDs of the output GeoLayers are unique (not an existing GeoLayer ID) Args: input_geolayer_id: the ID of the input GeoLayer attribute_name (str): the name of the attribute in which to split the GeoLayer output_geolayer_ids: the IDs of the output GeoLayers Returns: Boolean. If TRUE, the GeoLayer should be split. If FALSE, at least one check failed and the GeoLayer should not be split. """ logger = logging.getLogger(__name__) # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input GeoLayerID is not an existing GeoLayerID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsGeoLayerIDExisting", "InputGeoLayerID", input_geolayer_id, "FAIL")) # If the input GeoLayer exists, continue with the checks. if False not in should_run_command: # Get the input GeoLayer object. input_geolayer = self.command_processor.get_geolayer( input_geolayer_id) # Get the attribute names of the input GeoLayer. list_of_attributes = input_geolayer.get_attribute_field_names() for i_attribute_name in list_of_attributes: logger.info('Input layer has attribute "' + str(i_attribute_name) + '"') # If the attribute name is not valid, raise a FAILURE. if attribute_name not in list_of_attributes: self.warning_count += 1 message = 'The attribute name ({}) is not valid.'.format( attribute_name) recommendation = 'Specify a valid attribute name. Valid attributes for this layer are as follows: ' \ '{}'.format(list_of_attributes) self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) logger.info('Found attribute "' + attribute_name + '" in input layer attributes') # If the OutputGeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends # on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "OutputGeoLayerID", output_geolayer_ids, None)) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True else: return True logger.info('Process can be run') def run_command(self): """ Run the command. Split the input GeoLayer by the selected Attribute. Create new GeoLayers based on unique attribute values. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. # @jurentie # 1. Parameter values are passed into the SplitGeoLayerByAttribute command editor -> GenericCommandEditor # 2. The SplitGEoLayerByAttribute command is updated when user changes input to parameter # and parsed by AbstractCommand into parameter values and saved as command_parameters. # 3. Obtain parameter value by calling parent function get_parameter_value from AbstractCommand # Get the 'Input GeoLayerID' parameter pv_InputGeoLayerID = self.get_parameter_value("InputGeoLayerID") pv_AttributeName = self.get_parameter_value("AttributeName") # TODO jurentie 01/26/2019 Need to figure out how default should work in this case # @jurentie # I've commented out the below, this is specific to ClipGeoLayer, creating a default # value that makes sense for 'Value_splityBy_value' but we are looking for Output GeoLayerID's for this # specific command. Default might just need to be the name of the file's automatically output by # runAlgorithm("qgis:splitvectorlayer", alg_parameters) which can be handled down below... # pv_OutputGeoLayerIDs = self.get_parameter_value("OutputGeoLayerIDs", default_value="{}_splitBy_{}".format( # pv_InputGeoLayerID, pv_AttributeName)) # Get OutputGeoLayerID's and split on ',' to create an array of Output GeoLayerId's # ex OutputGeoLayerIDs = 'ouput1, output1, output3' # pv_OutputGeoLayerIDs = ['output1', 'output2', 'output3'] try: pv_OutputGeoLayerIDs = self.get_parameter_value( "OutputGeoLayerIDs").split(',') except: # Get the list of features from the GeoLayer. This returns all attributes for each feature listed. pv_OutputGeoLayerIDs = None # Create logger logger = logging.getLogger(__name__) # Run the checks on the parameter values. Only continue if the checks passed. # @jurentie # The following line is currently commented out but needs to be added back in once # __should_split_geolayer is functioning properly... if self.check_command_input(pv_InputGeoLayerID, pv_AttributeName, pv_OutputGeoLayerIDs): try: # Get the Input GeoLayer. # Get the GeoLayer which will be QgsVectorLayer # https://qgis.org/api/classQgsVectorLayer.html # Passes a GeoLayerID to GeoProcessor to return the GeoLayer that matches the ID input_geolayer = self.command_processor.get_geolayer( pv_InputGeoLayerID) logger.info('Input GeoLayer [GeoLayerID: ' + pv_InputGeoLayerID + '] has been read in successfully') # TODO jurentie 01/26/2019 still need to figure out what the code below is for... # If the input GeoLayer is an in-memory GeoLayer, make it an on-disk GeoLayer. if input_geolayer.source_path is None or input_geolayer.source_path.upper( ) in ["", "MEMORY"]: # Get the absolute path of the GeoLayer to write to disk. geolayer_disk_abs_path = os.path.join( self.command_processor.get_property('TempDir'), input_geolayer.id) logger.info('GeoLayer path ' + geolayer_disk_abs_path) # Write the GeoLayer to disk. Overwrite the (memory) GeoLayer in the geoprocessor with the # on-disk GeoLayer. input_geolayer = input_geolayer.write_to_disk( geolayer_disk_abs_path) self.command_processor.add_geolayer(input_geolayer) # Select the Attribute # NEED TO CREATE A 'input_geolayer.select_attribute' FUNCTION IN GEOLAYER.PY?? # SOMETHING LIKE: attribute_name = input_geolayer.select_attribute(pv_AttributeName) # OR SHOULD THE FOLLOWING JUST WORK? attribute_name = pv_AttributeName working_dir = self.command_processor.properties['WorkingDir'] # @jurentie # TODO @jurentie 01/31/2019 How to handle absolute path # Assuming relative path for the moment.... # Perform the QGIS split vector layer function. Refer to the reference below for parameter descriptions. # REF: https://docs.qgis.org/2.8/en/docs/user_manual/processing_algs/qgis/vector_general_tools.html#split-vector-layer # @jurentie # Check to see if parameter temporary folder has been specified, otherwise use the # default environment temp folder directory. #boolean to see if working with custom temp folder temp_directory_custom = False try: # Append specified temporary folder to working directory to create temp files in current # command file location. temp_directory = working_dir + "/" + self.get_parameter_value( "TemporaryFolder") temp_directory_custom = True except: # If using the default temp directory from environment variables create a temp folder to # easily remove all files temp_directory = tempfile.gettempdir() temp_directory += "/qgissplitvectorlayer-outputfiles" # @jurentie # Assign parameter to pass into runAlgorithm for splitting the GeoLayer # Input = input GeoLayer (QgsVectorLayer) # Field = Attribute name to split by # Output = path to write output GeoLayers to this creates a list of files following the naming # convention attributeName_attribute.extension # ex: GNIS_ID_00030007.shp # file types generated = .dbf, .prj, .qpj, .shp, .shx alg_parameters = { "INPUT": input_geolayer.qgs_vector_layer, "FIELD": attribute_name, "OUTPUT": temp_directory } # @jurentie # call runAlgorithm with the parameter "qgis:splitvectorlayer" (a list of possible parameters # that can be passed here can be found here # https://gist.github.com/jurentie/7b6c53d5a592991b6bb2491fcc5f01eb) # pass in the parameters defined above # This should result in separate GeoLayer shapefiles being written to the OUTPUT directory split_output = self.command_processor.qgis_processor.runAlgorithm( "qgis:splitvectorlayer", alg_parameters) # Create new GeoLayers and add them to the GeoProcessor's geolayers list. # @jurentie # TODO jurentie 01/26/2019 There probably needs to be some error handling happening below # Get the list of features from the GeoLayer. This returns all attributes for each feature listed. features = input_geolayer.qgs_vector_layer.getFeatures() # Set the extension for the filename's to get the geolayer from filename_extension = ".shp" # Parse through the list of features and also enumerate to get the index which # is used for accessing which OutputGeoLayerIDs to name each GeoLayer. # TODO jurentie 01/26/2019 need to decide what to do with a default OutputGeoLayerIDs # 1. Get the attribute of interest from each feature # TODO jurentie 01/26/2019 need to handle parsing out unique attributes only... # 2. Create the path name using the output folder specified in alg_parameters and passed in to # split_output, and the naming convention defaults for qgis:splitvectorlayer # 3. Construct a QgsVectorLayer() # Parameters: # path: The path or url of the parameter. Typically this encodes parameters used by the data # provider as url query items. # baseName: The name used to represent the layer in the legend # providerLib: The name of the data provider, e.g., "memory", "postgres" # options: layer load options # For more info see: # https://qgis.org/api/classQgsVectorLayer.html#a1e7827a9d7bd33549babdc3bd7a279fd # 4. Construct a new GeoLayer from the QgsVectorLayer() # Parameters: # geolayer_id (str): String that is the GeoLayer's reference ID. This ID is used to access the # GeoLayer from the GeoProcessor for manipulation. # geolayer_qgs_vector_layer (QGSVectorLayer): Object created by the QGIS processor. # All GeoLayer spatial manipulations are performed on the GeoLayer's qgs_vector_layer. # geolayer_source_path (str): The full pathname to the original spatial data file on the # user's local computer. If the geolayer was made in memory from the GeoProcessor, this value # is set to `MEMORY`. # 5. Add the new GeoLayer to the GeoProcessor for i, feature in enumerate(features): attribute = feature[attribute_name] path = temp_directory + "/" + attribute_name + "_" + str( attribute) + filename_extension layer = QgsVectorLayer(path, "layer" + str(attribute), "ogr") try: new_geolayer = GeoLayer(pv_OutputGeoLayerIDs[i], layer, path) except: # Default Output GeoLayerID's will be default title of output files from .runAlgorithm above new_geolayer = GeoLayer( attribute_name + "_" + str(attribute), layer, path) self.command_processor.add_geolayer(new_geolayer) # @jurentie # remove files if specified in parameters # TODO @jurentie figure out how to delete files after using them... # remove_files = self.get_parameter_value("RemoveTemporaryFiles") # files = glob.glob(temp_directory + "/*") # print(files) # if remove_files == None: # # Remove all files from directory # for f in files: # os.remove(f) # os.rmdir(temp_directory) # In QGIS 2 the clipped_output["OUTPUT"] returned the full file pathname of the memory output layer # (saved in a QGIS temporary folder) # qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_file(clipped_output["OUTPUT"]) # new_geolayer = GeoLayer(pv_OutputGeoLayerID, qgs_vector_layer, "MEMORY") # Get this list of ID's, name can be changed later to make more sense # in a dynamic fashion # In QGIS 3 the clipped_output["OUTPUT"] returns the QGS vector layer object # new_geolayer = GeoLayer(pv_OutputGeoLayerIDs, split_output["OUTPUT"], "MEMORY") # self.command_processor.add_geolayer(new_geolayer) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error splitting GeoLayer {}.".format( pv_InputGeoLayerID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class WriteTableToDelimitedFile(AbstractCommand): """ Writes a Table to an delimiter-separated file. Command Parameters * TableID (str, required): the identifier of the Table to be written to the delimited file * OutputFile (str, required): the relative pathname of the output delimited file. * Delimiter (str, optional): the delimiter of the output file. Default is `,` Must be a one-character string (limitation is built into the Pandas to_csv command). * IncludeColumns (str, optional): A list of glob-style patterns to determine the table columns to include in the output delimited file. Default: * (All columns are written). * ExcludeColumns (str, optional): A list of glob-style patterns to determine the table columns to exclude in the output delimited file. Default: Default: '' (No columns are excluded from the output delimited file). * WriteHeaderRow (bool, optional): If TRUE, the header row is written, If FALSE, the header row is excluded. Default: True * WriteIndexColumn (bool, optional): If TRUE, the index column is written, If FALSE, the index column is excluded. Default: True * SortColumns (str, optional): The names of the Table columns, separated by commas, used to sort the order that the table records are written to the delimited file. Default: The first Table column. * SortOrder(str, optional): The sort order for the columns specified by SortColumns, using the syntax: SortColumn1:Ascending,SortColumn2:Descending Default: Ascending * ArrayFormat (str, optional): If SquareBrackets, table column array values are written as a string with square brackets. If CurlyBrackets, table column array values are written as a string with curly brackets. Default: SquareBrackets * NullValueFormat (str, optional): If `NULL`, table column array values with None items are changed to NULL. If `None`, table column array values with None items remain None. Default: NULL """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("TableID", type("")), CommandParameterMetadata("OutputFile", type("")), CommandParameterMetadata("Delimiter", type("")), CommandParameterMetadata("IncludeColumns", type("")), CommandParameterMetadata("ExcludeColumns", type("")), CommandParameterMetadata("WriteHeaderRow", type("")), CommandParameterMetadata("WriteIndexColumn", type("")), CommandParameterMetadata("SortColumns", type("")), CommandParameterMetadata("SortOrder", type("")), CommandParameterMetadata("ArrayFormat", type("")), CommandParameterMetadata("NullValueFormat", type("")) ] # Choices for parameters, used to validate parameter and display in editor __choices_ArrayFormat = ["SquareBrackets", "CurlyBrackets"] __choices_NullValueFormat = ["Null", "None"] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "WriteTableToDelimitedFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Write a table to a delimited file." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # TableID self.parameter_input_metadata[ 'TableID.Description'] = "table identifier" self.parameter_input_metadata['TableID.Label'] = "TableID" self.parameter_input_metadata['TableID.Required'] = True self.parameter_input_metadata[ 'TableID.Tooltip'] = "A Table identifier to write" # OutputFile self.parameter_input_metadata[ 'OutputFile.Description'] = "output delimited file" self.parameter_input_metadata['OutputFile.Label'] = "Output file" self.parameter_input_metadata['OutputFile.Required'] = True self.parameter_input_metadata['OutputFile.Tooltip'] = ( "The output delimited file (relative or absolute path).\n" "${Property} syntax is recognized.") self.parameter_input_metadata['OutputFile.FileSelector.Type'] = 'Write' self.parameter_input_metadata[ 'OutputFile.FileSelector.Title'] = 'Select delimited file' # Delimiter self.parameter_input_metadata[ 'Delimiter.Description'] = "delimiter for file" self.parameter_input_metadata['Delimiter.Label'] = "Delimiter" self.parameter_input_metadata['Delimiter.Tooltip'] = \ "The delimiter of the output delimited file. Must be a single character." self.parameter_input_metadata['Delimiter.Value.Default'] = "," # IncludeColumns self.parameter_input_metadata[ 'IncludeColumns.Description'] = "columns to include" self.parameter_input_metadata[ 'IncludeColumns.Label'] = "Include columns" self.parameter_input_metadata['IncludeColumns.Tooltip'] = \ "A comma-separated list of the glob-style patterns filtering which columns to write." self.parameter_input_metadata[ 'IncludeColumns.Value.Default.Description'] = "* - all columns" # ExcludeColumns self.parameter_input_metadata[ 'ExcludeColumns.Description'] = "columns to exclude " self.parameter_input_metadata[ 'ExcludeColumns.Label'] = "Exclude columns" self.parameter_input_metadata['ExcludeColumns.Tooltip'] = \ "A comma-separated list of the glob-style patterns filtering which columns to write. " self.parameter_input_metadata[ 'ExcludeColumns.Value.Default.Description'] = "no columns are excluded" # WriteHeaderRow self.parameter_input_metadata[ 'WriteHeaderRow.Description'] = "how to write headers" self.parameter_input_metadata[ 'WriteHeaderRow.Label'] = "Write header row" self.parameter_input_metadata['WriteHeaderRow.Tooltip'] = ( "If TRUE, the Table's header row is included in the output delimited file.\n" "If FALSE, the Table's header row is not included in the output delimited file." ) self.parameter_input_metadata['WriteHeaderRow.Value.Default'] = "TRUE" self.parameter_input_metadata['WriteHeaderRow.Values'] = [ "", "TRUE", "FALSE" ] # WriteIndexColumn self.parameter_input_metadata[ 'WriteIndexColumn.Description'] = "write index column" self.parameter_input_metadata[ 'WriteIndexColumn.Label'] = "Write index column" self.parameter_input_metadata['WriteIndexColumn.Tooltip'] = ( "If TRUE, the Table's index column is included in the output delimited file. " "The index column header is an empty string.\n" "If FALSE, the Table's index column is not included in the output delimited file." ) self.parameter_input_metadata[ 'WriteIndexColumn.Value.Default'] = "FALSE" self.parameter_input_metadata['WriteIndexColumn.Values'] = [ "", "TRUE", "FALSE" ] # SortColumns self.parameter_input_metadata[ 'SortColumns.Description'] = "columns to sort data" self.parameter_input_metadata['SortColumns.Label'] = "Sort columns" self.parameter_input_metadata['SortColumns.Tooltip'] = ( "The names of the Table columns, separated by columns, used to sort the order that the table records " "are written to the delimited file") self.parameter_input_metadata[ 'SortColumns.Value.Default'] = "the first table column" # SortOrder self.parameter_input_metadata[ 'SortOrder.Description'] = "sort order for columns" self.parameter_input_metadata['SortOrder.Label'] = "Sort order" self.parameter_input_metadata['SortOrder.Tooltip'] = ( "The sort order for columns specified by SortColumns, using the syntax:\n\n" "SortColumn1:Ascending,SortColumn2:Descending\n\n" "As indicated in the above example, the sort order must be specified as one of " "the following: Ascending or Descending.") self.parameter_input_metadata['SortOrder.Value.Default'] = "Ascending" # ArrayFormat self.parameter_input_metadata[ 'ArrayFormat.Description'] = "how column array values are written" self.parameter_input_metadata['ArrayFormat.Label'] = "Array format" self.parameter_input_metadata['ArrayFormat.Tooltip'] = ( "If SquareBrackets, table column array values are written as a string with square brackets ([]) " "and comma delimiter.\n" "If CurlyBrackets, table column array values are written as a string with curly brackets ({}) " "and comma delimiter.") self.parameter_input_metadata[ 'ArrayFormat.Value.Default'] = "SquareBrackets" self.parameter_input_metadata['ArrayFormat.Values'] = [ "", "SqaureBrackets", "CurlyBrackets" ] # NullValueFormat self.parameter_input_metadata[ 'NullValueFormat.Description'] = "specify how NONE values should be written" self.parameter_input_metadata[ 'NullValueFormat.Label'] = "Null value format" self.parameter_input_metadata['NullValueFormat.Tooltip'] = ( "If NULL, None items in table column array values are written as NULL. ex: '[NULL, 4, NULL]'\n" "If None, None items in table column array values are written as None. ex: '[None, 4, None]'" ) self.parameter_input_metadata['NullValueFormat.Value.Default'] = "NULL" self.parameter_input_metadata['NullValueFormat.Values'] = [ "", "NULL", "None" ] # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter TableID is a non-empty, non-None string. pv_TableID = self.get_parameter_value( parameter_name='TableID', command_parameters=command_parameters) if not validators.validate_string(pv_TableID, False, False): message = "TableID parameter has no value." recommendation = "Specify the TableID parameter to indicate the Table to write." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter OutputFile is a non-empty, non-None string. pv_OutputFile = self.get_parameter_value( parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, False, False): message = "OutputFile parameter has no value." recommendation = "Specify the OutputFile parameter (relative or absolute pathname) to indicate the " \ "location and name of the output delimited file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that the required parameters are valid Boolean values or None. parameters = ['WriteIndexColumn', 'WriteHeaderRow'] for parameter in parameters: parameter_value = self.get_parameter_value( parameter_name=parameter, command_parameters=command_parameters) if not validators.validate_bool(parameter_value, True, False): message = "{} parameter ({}) is not a valid Boolean value.".format( parameter, parameter_value) recommendation = "Specify a valid Boolean value for the {} parameter.".format( parameter) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter ArrayFormat is one of the acceptable values or is None. pv_ArrayFormat = self.get_parameter_value( parameter_name="ArrayFormat", command_parameters=command_parameters) if not validators.validate_string_in_list(pv_ArrayFormat, self.__choices_ArrayFormat, none_allowed=True, empty_string_allowed=False, ignore_case=True): message = "ArrayFormat parameter value ({}) is not recognized.".format( pv_ArrayFormat) recommendation = "Specify one of the acceptable values ({}) for the ArrayFormat parameter.".format( self.__choices_ArrayFormat) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter NullValueFormat is one of the acceptable values or is None. pv_NullValueFormat = self.get_parameter_value( parameter_name="NullValueFormat", command_parameters=command_parameters) if not validators.validate_string_in_list( pv_NullValueFormat, self.__choices_NullValueFormat, none_allowed=True, empty_string_allowed=False, ignore_case=True): message = "NullValueFormat parameter value ({}) is not recognized.".format( pv_NullValueFormat) recommendation = "Specify one of the acceptable values ({}) for the NullValueFormat parameter.".format( self.__choices_NullValueFormat) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_write_table(self, table_id, output_file_abs, delimiter, sort_columns): """ Checks the following: * the ID of the Table is an existing Table ID * the output folder is a valid folder * check that the delimiter is only one character * check that the columns within the SortColumns are existing columns Args: table_id: the ID of the Table to be written output_file_abs: the full pathname to the output file delimiter: the delimiter string that will separate each column in the output file sort_columns: a list of table columns used to sort the records Returns: run_write: Boolean. If TRUE, the writing process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the Table ID is not an existing Table ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableIdExisting", "TableID", table_id, "FAIL")) # If the Table ID does exist and the sort_columns is not None, continue with checks. if True in should_run_command and sort_columns is not None: # Get the Table object table = self.command_processor.get_table(table_id) # Get a list of the columns in the table. columns = table.return_fieldnames() # If one of the SortingColumns does not exist in the Table, raise a FAILURE. invalid_columns = [] for sort_column in sort_columns: if sort_column not in columns: invalid_columns.append(sort_column) if invalid_columns: message = 'The SortColumns ({}) are not columns in the table ({}).'.format( invalid_columns, table_id) recommendation = 'Specify columns within the Table. \nValid columns: {}'.format( columns) self.warning_count += 1 self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) should_run_command.append(False) # Get the full path to the output folder output_folder_abs = io_util.get_path(output_file_abs) # If the output folder is not an existing folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "OutputFile", output_folder_abs, "FAIL")) # If the delimiter is not 1 character, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsStringLengthCorrect", "Delimiter", delimiter, "FAIL", other_values=[1])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True @staticmethod def __write_table_to_delimited_file(path, table_obj, delimiter, cols_to_include_list, cols_to_exclude_list, include_header, include_index, sort_columns, sorting_dic, use_sq_brackets, use_null_values): """ Writes a GeoProcessor table to a delimited file. There are many parameters to customize how the table is written to the delimited file. Args: path (str): the full pathname to the output file (can be an existing file or a new file). If it is existing, the file will be overwritten. table_obj (obj): the GeoProcessor Table to write delimiter (str): a single character delimiter to separate each column in the delimited file cols_to_include_list (list): a list of glob-style pattern strings used to select the columns to write cols_to_exclude_list (list): a list of glob-style pattern strings used to select the columns to NOT write include_header (boolean): boolean to determine if the header row should be written. If TRUE, the header row is written. If FALSE, the header row is not written. include_index (boolean): boolean to determine if the index column should be written. If TRUE, the index column is written. If FALSE, the index column is not written. sort_columns (list): the names of the columns to use to sort the table records sorting_dic (dic): a dictionary that relates each sorting column with the its corresponding order Available options: ASCENDING or DESCENDING Key: the name of the sorting column Value: the sorting order use_sq_brackets (boolean): boolean specifying the types of brackets to use around list/array data values. If TRUE, square brackets are used. If FALSE, curly brackets are used. use_null_values (boolean): boolean specifying if None values in arrays should be represented as None or as NULL. If TRUE, NULL is used. If FALSE, None is used. Return: None """ # Get a list of the table's fieldnames. fieldnames = table_obj.return_fieldnames() # Get a list of the table's records. Each record is a list of data values. all_records = [ table_record.items for table_record in table_obj.table_records ] # Sort the records by the values of a field. try: if sort_columns: for i in range(len(sort_columns)): # Get the sort column. sort_column = sort_columns[i] # Get the column index. index = fieldnames.index(sort_column) # Get the appropriate sorting order. if sort_column in list(sorting_dic.keys()): sort_order = sorting_dic[sort_column] else: sort_order = "ASCENDING" if sort_order.upper() == "ASCENDING" and i == 0: s = sorted(all_records, key=itemgetter(index)) elif sort_order.upper() == "DESCENDING" and i == 0: s = sorted(all_records, key=itemgetter(index), reverse=True) elif sort_order.upper() == "ASCENDING": s = sorted(s, key=itemgetter(index)) elif sort_order.upper() == "DESCENDING": s = sorted(s, key=itemgetter(index), reverse=True) all_records = s # If a sorting column is not specified, sort the records with the first column. else: # Sort the records in ascending order of the first table column. all_records = sorted(all_records, key=itemgetter(0)) # Try to sort but do not throw an error if the sort fails. Instead keep the records in the original order. except: all_records = all_records # If an index column is specified to be written, add the index column to each record. if include_index: # Iterate over each record and insert the record count as the first item in the record. for i in range(len(all_records)): all_records[i].insert(0, str(i)) # If a header row is specified to be written, continue. if include_header: # If an index column is specified to be written, add an empty string to the first item of the header list. if include_index: fieldnames.insert(0, "") # Insert the header list (fieldnames) as the first item of the all_records list. all_records.insert(0, fieldnames) # Determine the fieldnames of the columns that should NOT be written to the delimited file. cols_to_remove = string_util.filter_list_of_strings( fieldnames, cols_to_include_list, cols_to_exclude_list, return_inclusions=False) # If an index column is specifies to be written, make sure that the first column (the index column) is not # specified to be removed. if include_index: del cols_to_remove[0] # Get the indexes of the columns that should NOT be written to the delimited file. cols_to_remove_indexes = [ fieldnames.index(col) for col in cols_to_remove ] # Iterate over each record in the table. for record in all_records: # Iterate over each column index specified NOT to be written to the delimited file. Remove the record's # data value for each column specified NOT to be written to the delimited file.Must iterate over the # indexes in reverse to ensure that the proper values are removed. for index in sorted(cols_to_remove_indexes, reverse=True): del record[index] # Open the output delimited file. Can be an existing or a new file path. with open(path, "w") as f: # Write the records (one record for each row) to the output delimited file. Use the specified delimiter # character. writer = csv.writer(f, delimiter=delimiter, lineterminator='\n') writer.writerows(all_records) # If configured to use NULL values in an array instead of default None values, continue. if use_null_values: # A dictionary to store all of the strings that are to be replaced with different strings. # Key: the string within the delimited file to be replaced # Value: the replacement string replacement_dictionary = {} # Open the output delimited file. with open(path, "r") as f: # Iterate over each row of the output delimited file. reader = csv.reader(f, delimiter=delimiter) for row in reader: # Iterate over each item in the row. for item in row: # If the item represents a list/array, continue. if item.startswith("[") and item.endswith("]"): # Remove the list/array brackets. new_item = item.replace("[", "") new_item = new_item.replace("]", "") # Convert the string into a list. items = new_item.split(",") # The replacement list holds the values that will be used to replace the original array. replacement_list = [] # Iterate over each item in the list/array. for subitem in items: # Remove leading and ending whitespaces from the item. subitem = subitem.strip() # If the item represents a None value, add a "NULL" string to the replacement_list. if subitem.upper() == "NONE": replacement_list.append("NULL") # If the item does not represent a None value, add the original value to the # replacement_list. else: replacement_list.append(str(subitem)) # Join the items in the replacement list back into a string. Add the brackets back. replace_str = ",".join(replacement_list).join( ("[", "]")) # Add the replacement string to the master replacement dictionary. replacement_dictionary[item] = replace_str # If there are items to replace, continue. if replacement_dictionary: # Open the output csv file and read the text in as a variable. with open(path, 'r') as f: file_text = f.read() # Iterate over the characters to be replaced. for orig, new in replacement_dictionary.items(): # Replace the text variable with the correct character. file_text = file_text.replace(orig, new) # Open the output csv file and overwrite the content with the updated text. with open(path, "w") as f: f.write(file_text) # If specified to use curly brackets around array/list data values, continue. # Otherwise, the lists and arrays uses the default square brackets. if not use_sq_brackets: # A dictionary to store all of the strings that are to be replaced with different strings. # Key: the string within the delimited file to be replaced # Value: the replacement string replacement_dictionary = {} # A dictionary to determine which characters are to be replaced (and their replacement characters). replacement_dictionary["["] = "{" replacement_dictionary["]"] = "}" # Open the output csv file and read the text in as a variable. with open(path, 'r') as f: file_text = f.read() # Iterate over the characters to be replaced. for orig, new in replacement_dictionary.items(): # Replace the text variable with the correct character. file_text = file_text.replace(orig, new) # Open the output csv file and overwrite the content with the updated text. with open(path, "w") as f: f.write(file_text) # If there are items to replace, continue. if replacement_dictionary: # Open the output csv file and read the text in as a variable. with open(path, 'r') as f: file_text = f.read() # Iterate over the characters to be replaced. for orig, new in replacement_dictionary.items(): # Replace the text variable with the correct character. file_text = file_text.replace(orig, new) # Open the output csv file and overwrite the content with the updated text. with open(path, "w") as f: f.write(file_text) def run_command(self): """ Run the command. Write the Table to a delimited file. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_TableID = self.get_parameter_value("TableID") pv_OutputFile = self.get_parameter_value("OutputFile") pv_Delimiter = self.get_parameter_value("Delimiter", default_value=",") pv_IncludeColumns = self.get_parameter_value("IncludeColumns", default_value="*") pv_ExcludeColumns = self.get_parameter_value("ExcludeColumns", default_value="") pv_WriteHeaderRow = self.get_parameter_value("WriteHeaderRow", default_value="True") pv_WriteIndexColumn = self.get_parameter_value("WriteIndexColumn", default_value="False") pv_SortColumns = self.get_parameter_value("SortColumns") pv_SortOrder = self.get_parameter_value("SortOrder", default_value="") pv_ArrayFormat = self.get_parameter_value( "ArrayFormat", default_value="SquareBrackets") pv_NullValueFormat = self.get_parameter_value("NullValueFormat", default_value="Null") # Convert the IncludeColumns, ExcludeColumns, and SortColumns parameter values to lists. cols_to_include = string_util.delimited_string_to_list( pv_IncludeColumns) cols_to_exclude = string_util.delimited_string_to_list( pv_ExcludeColumns) sort_cols_list = string_util.delimited_string_to_list(pv_SortColumns) # Convert the SortOrder to a dictionary. sort_dictionary = string_util.delimited_string_to_dictionary_one_value( pv_SortOrder, entry_delimiter=",", key_value_delimiter=":", trim=True) # Convert the OutputFile parameter value relative path to an absolute path and expand for ${Property} syntax output_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_OutputFile, self))) # Covert the Boolean parameters from string to Boolean values. pv_WriteHeaderRow = string_util.str_to_bool(pv_WriteHeaderRow) pv_WriteIndexColumn = string_util.str_to_bool(pv_WriteIndexColumn) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_write_table(pv_TableID, output_file_absolute, pv_Delimiter, sort_cols_list): try: # Get the Table object table = self.command_processor.get_table(pv_TableID) # Determine if square brackets should be used depending on the user input of the ArrayFormat parameter. use_sq_brackets = False if pv_ArrayFormat.upper() == "SQUAREBRACKETS": use_sq_brackets = True # Determine if the null values should be used depending on the user input of the NullValueFormat # parameter. use_null_value = False if pv_NullValueFormat.upper() == "NULL": use_null_value = True # Write the table to the delimited file. self.__write_table_to_delimited_file( output_file_absolute, table, pv_Delimiter, cols_to_include, cols_to_exclude, pv_WriteHeaderRow, pv_WriteIndexColumn, sort_cols_list, sort_dictionary, use_sq_brackets, use_null_value) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error writing Table {} to delimited file {}.".format( pv_TableID, pv_OutputFile) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class ReadTableFromDataStore(AbstractCommand): """ Reads a Table from a DataStore object. Command Parameters * DataStoreID (str, required): The id of a database datastore to read. ${Property} syntax is recognized. * DataStoreTable (str, optional): The name of the database table to read when querying a single table or view. Can use ${Property} notation to insert processor property values. If specified, do not specify Sql or SqlFile. * Sql(str, optional): The SQL string that will be used to query the database, optionally using ${Property} notation to insert processor property values. If specified, do not specify DataStoreTable or SqlFile. * SqlFile(str, optional): The name of the file containing an SQL string to execute, optionally using ${Property} notation in the SQL file contents to insert processor property values. If specified, do not specify DataStoreTable or Sql. * Top (str, optional): Indicate how many rows to return. Default: return all rows. Must be a string representing a positive integer. Only enabled if DataStoreTable is enabled. * IncludeColumns (str, optional): A list of glob-style patterns to determine the DataStore table columns to read. Default: * (All columns are read). * ExcludeColumns (str, optional): A list of glob-style patterns to determine the DataStore table columns to read. Default: '' (No columns are excluded - All columns are read). * TableID (str, required): Identifier to assign to the output table in the GeoProcessor, which allows the table data to be used with other commands. A new table will be created. Can be specified with ${Property}. * IfTableIDExists (str, optional): This parameter determines the action that occurs if the TableID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("DataStoreID", type("")), CommandParameterMetadata("DataStoreTable", type("")), CommandParameterMetadata("Sql", type("")), CommandParameterMetadata("SqlFile", type("")), CommandParameterMetadata("Top", type("")), CommandParameterMetadata("IncludeColumns", type("")), CommandParameterMetadata("ExcludeColumns", type("")), CommandParameterMetadata("TableID", type("")), CommandParameterMetadata("IfTableIDExists", type("")) ] # Choices for IfTableIDExists, used to validate parameter and display in editor __choices_IfTableIDExists = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "ReadTableFromDataStore" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Read a table from a database DataStore." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # DataStoreID self.parameter_input_metadata[ 'DataStoreID.Description'] = "database datastore to read" self.parameter_input_metadata['DataStoreID.Label'] = "DataStoreID" self.parameter_input_metadata['DataStoreID.Required'] = True self.parameter_input_metadata['DataStoreID.Tooltip'] = \ "The ID of a database DataStore to read. ${Property} syntax is recognized." # DataStoreTable self.parameter_input_metadata[ 'DataStoreTable.Description'] = "database table or view to read" self.parameter_input_metadata[ 'DataStoreTable.Label'] = "DataStore table" self.parameter_input_metadata['DataStoreTable.Tooltip'] = ( "The name of the database table to read when querying a single table. " \ "${Property} syntax is recognized. \n" \ "If specified, do not specify Sql or SqlFile.") # TODO @jurentie 01/22/19 do these need to be read file selector type? self.parameter_input_metadata[ 'DataStoreTable.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'DataStoreTable.FileSelector.Title'] = "Select DataStore Table to read" # Sql self.parameter_input_metadata[ 'Sql.Description'] = "SQL to query the database" self.parameter_input_metadata['Sql.Label'] = "SQL" self.parameter_input_metadata['Sql.Tooltip'] = ( "The SQL string that will be used to query the database. ${Property} syntax is recognized.\n" "If specified, do not specify DataStoreTable or SqlFile.") # SqlFile self.parameter_input_metadata[ 'SqlFile.Description'] = "name of the file containing SQL string" self.parameter_input_metadata['SqlFile.Label'] = "SQL File" self.parameter_input_metadata['SqlFile.Tooltip'] = ( "The name of the file containing an SQL string to execute. ${Property} syntax is recognized.\n" "If specified, do not specify DataStoreTable or Sql.") self.parameter_input_metadata['SqlFile.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'SqlFile.FileSelector.Title'] = "Select the SQL file" # TableID self.parameter_input_metadata[ 'TableID.Description'] = "output table identifier" self.parameter_input_metadata['TableID.Label'] = "TableID" self.parameter_input_metadata['TableID.Required'] = True self.parameter_input_metadata['TableID.Tooltip'] = \ "A Table identifier for the table to be created to contain results. ${Property} syntax is recognized." # Top self.parameter_input_metadata[ 'Top.Description'] = "number of rows to read" self.parameter_input_metadata['Top.Label'] = "Top" self.parameter_input_metadata['Top.Tooltip'] = \ "An integer to indicate the number of rows that should be returned. Must be a positive integer. " self.parameter_input_metadata[ 'Top.Value.Default.Description'] = "All rows are returned." # IncludeColumns self.parameter_input_metadata['IncludeColumns.Description'] =\ "list of patterns to determine the columns to read" self.parameter_input_metadata[ 'IncludeColumns.Label'] = "Include columns" self.parameter_input_metadata['IncludeColumns.Tooltip'] = \ "A list of glob-style patterns to determine the DataStore table columns to read." self.parameter_input_metadata[ 'IncludeColumns.Value.Default.Description'] = "* - all columns read" # ExcludeColumns self.parameter_input_metadata['ExcludeColumns.Description'] =\ "list of patterns to determine the columns to NOT read" self.parameter_input_metadata[ 'ExcludeColumns.Label'] = "Exclude columns" self.parameter_input_metadata['ExcludeColumns.Tooltip'] = \ "A list of glob-style patterns to determine the DataStore table columns to NOT read. " self.parameter_input_metadata[ 'ExcludeColumns.Value.Default'] = "No columns are excluded" # IfTableIDExists self.parameter_input_metadata[ 'IfTableIDExists.Description'] = "action if TableID already exists" self.parameter_input_metadata[ 'IfTableIDExists.Label'] = "If table exists" self.parameter_input_metadata['IfTableIDExists.Tooltip'] = ( "The action that occurs if the TableID already exists within the GeoProcessor. \n" "Replace : The existing Table within the GeoProcessor is overwritten with the new Table. " "No warning is logged." "ReplaceAndWarn: The existing Table within the GeoProcessor is overwritten with the new Table. " "A warning is logged.\n" "Warn : The new Table is not created. A warning is logged.\n" "Fail : The new Table is not created. A fail message is logged.") self.parameter_input_metadata['IfTableIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfTableIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter TableID is a non-empty, non-None string. pv_TableID = self.get_parameter_value( parameter_name='TableID', command_parameters=command_parameters) if not validators.validate_string(pv_TableID, False, False): message = "TableID parameter has no value." recommendation = "Specify the TableID parameter to indicate the Table to write." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter DataStoreID is a non-empty, non-None string. pv_DataStoreID = self.get_parameter_value( parameter_name='DataStoreID', command_parameters=command_parameters) if not validators.validate_string(pv_DataStoreID, False, False): message = "DataStoreID parameter has no value." recommendation = "Specify the DataStoreID parameter (relative or absolute pathname) to indicate the " \ "location and name of the output delimited file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that one (and only one) selection method is a non-empty and non-None string. is_string_list = [] selection_method_parameter_list = ["Sql", "SqlFile", "DataStoreTable"] for parameter in selection_method_parameter_list: parameter_value = self.get_parameter_value( parameter_name=parameter, command_parameters=command_parameters) is_string_list.append( validators.validate_string(parameter_value, False, False)) if not is_string_list.count(True) == 1: message = "Must enable one (and ONLY one) of the following parameters: {}".format( selection_method_parameter_list) recommendation = "Specify the value for one (and ONLY one) of the following parameters: {}".format( selection_method_parameter_list) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Run the checks for the Top parameter. pv_Top = self.get_parameter_value( parameter_name='Top', command_parameters=command_parameters) pv_DataStoreTable = self.get_parameter_value( parameter_name="DataStoreTable", command_parameters=command_parameters) if pv_Top: # Check that the Top parameter is only used with the DataStoreTable selection. if is_string_list.count(True) == 1 and not pv_DataStoreTable: message = "The Top parameter is only valid when the DataStoreTable is enabled. The Top parameter" \ " value ({}) will be ignored.".format(pv_Top) recommendation = "To use the Top parameter, specify a value for the DataStoreTable parameter." self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # If the DataStoreTable parameter is enabled, check that the Top parameter is an integer or None. if pv_DataStoreTable and not validators.validate_int( pv_Top, True, False): message = "Top parameter value ({}) is not a valid integer value.".format( pv_Top) recommendation = "Specify a positive integer for the Top parameter to specify how many rows to return." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the DataStoreTable parameter is enabled, check that the Top parameter is positive. elif pv_DataStoreTable and not int(pv_Top) > 0: message = "Top parameter value ({}) is not a positive, non-zero integer value.".format( pv_Top) recommendation = "Specify a positive integer for the Top parameter to specify how many rows to return." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfTableIDExists is one of the acceptable values or is None. pv_IfTableIDExists = self.get_parameter_value( parameter_name="IfTableIDExists", command_parameters=command_parameters) if not validators.validate_string_in_list( pv_IfTableIDExists, self.__choices_IfTableIDExists, none_allowed=True, empty_string_allowed=False, ignore_case=True): message = "IfTableIDExists parameter value ({}) is not recognized.".format( pv_IfTableIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfTableIDExists parameter.".format( self.__choices_IfTableIDExists) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_read_table(self, sql_file_abs, table_id, datastore_id): """ Checks the following: * the SqlFile (absolute) is a valid file, if not None * the ID of the Table is unique (not an existing Table ID) * the DataStore exists Args: sql_file_abs (str): the full pathname to the sql file table_id (str): the ID of the output Table datastore_id (str): the ID of the DataStore to read Returns: Boolean. If TRUE, the reading process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # Only run following check if SqlFile method is being used. if sql_file_abs: # If the SqlFile is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "SqlFile", sql_file_abs, "FAIL")) # If the TableID is the same as an already-existing TableID, raise a WARNING or FAILURE (depends on the # value of the IfTableIDExists parameter.) should_run_command.append( validators.run_check(self, "IsTableIdUnique", "TableID", table_id, None)) # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True @staticmethod def __read_table_from_datastore(ds, table_name, table_id, top, sql, cols_to_include, cols_to_exclude): """ Creates a GeoProcessor table object from a DataStore table. Args: ds (obj): the DataStore object that contains the DataStore table to read table_name (str): the name of the DataStore table to read Can be None if using the Sql method or SqlFile method. table_id (str): the id of the GeoProcessor Table that is to be created top (int): the number of rows from the DataStore Table to read Can be None if using the Sql method or SqlFile method. sql (str): the SQL statement to select out the desired data from the DataStore table. Can be None if using the DataStoreTable method. cols_to_include (list): a list of glob-style patterns representing the DataStore Table columns to read Can be None if using the Sql method or SqlFile method. cols_to_exclude (list): a list of glob-style patterns representing the DataStore Table columns to read Can be None if using the Sql method or SqlFile method. Return: A GeoProcessor Table object. """ # Create a GeoProcessor Table object. table = Table(table_id) # If a SQL statement has been specified, then continue. if sql: # Run the SQL statement result_from_sql = ds.connection.execute(sql) # Get the columns from the sql statement. table_cols = ds.connection.execute(sql).keys() # Get the first row from the result set. row = result_from_sql.fetchone() # An empty list to hold the columns that were included in the result set in response to the user-specified # sql. included_cols = [] # Iterate over all of the available columns in the DataStore table. for table_col in table_cols: # Try to read the value of the DataStore table column. If it does not throw an error, it is known that # the column was included in the result set of the user-specified SQL statement. Add the column name to # the included_cols list. try: value = row[table_col] included_cols.append(table_col) # If an error is thrown, it is known that the column was not included in the result set of the # user-specified SQL statement. Do not add the column name to the included_cols list. except: pass # Iterate over the DataStore table columns that do have results from the user-specified SQL statement. for included_col in included_cols: # Create a TableField object and assign the field "name" as the column name. table_field = TableField(included_col) # Run the SQL statement result_from_sql = ds.connection.execute(sql) # Iterate over the rows of the DataStore table data. for row in result_from_sql: # Add the row data for the column to the item list of the TableField. table_field.items.append(row[included_col]) # Determine the data type of the column's data. # A list that holds the data type for each data value in the column. data_types = [] # Iterate over each of the data values in the column. for item in table_field.items: # Add the data type of the item to the data_types list. Ignore data values that are None. if item is not None: data_types.append(type(item)) # If the data_types list is empty, assume that all values in the column are set to None. if not data_types: table_field.data_type = None # Set the data_type attribute of the TableField object to that specified in the data_types list. elif all(x == data_types[0] for x in data_types): table_field.data_type = data_types[0] # All of the data types in the list should be the same value because database columns require that # the data in each column is only one data type. If more than one data type exists in the data_types # list, print an error message. else: print( "There was an error. Not all the data types are the same." ) # Add the TableField object to the Table attributes. table.add_table_field(table_field) # Get the number of row entries in the TableField. This will be the same number for each of the # TableField objects so only the count of the entries in the last TableField object is used in the # remaining code. table.entry_count = len(table_field.items) # If a SQL statement has not been specified, continue. else: # Read the DataStore table into a DataStore Table object. ds_table_obj = ds.metadata.tables[table_name] # Query the DataStore table. The allows access to table information. q = ds.session.query(ds_table_obj) # Select all fields and rows of the table. s = sqlalchemy.sql.select([ds_table_obj]) # Get a list of all of the column names. table_cols = [col["name"] for col in q.column_descriptions] # Sort the list of column names to create create a second list that only includes the columns to read. table_cols_to_read = string_util.filter_list_of_strings( table_cols, cols_to_include, cols_to_exclude, True) # Sort the table_cols_to_read list to order in the same order as the table columns in the DataStore table. cols_names = ds.return_col_names(table_name) table_cols_to_read = [ col_name for col_name in cols_names if col_name in table_cols_to_read ] # Iterate over the column names to read. for col in table_cols_to_read: # Create a TableField object and assign the field "name" as the column name. table_field = TableField(col) # Run the SQL query to get the DataStore tables' data. Save as result variable. result = ds.connection.execute(s) # If configured to limit the table read to a specified number of top rows, continue. if top: # Counter to track the number of rows read into the Table Field items. count = 0 # Iterate over the rows of the DataStore table data. for row in result: # If the current row count is less than the desired row count, continue. while count < top: # Add the row data for the column to the item list of the TableField. Increase the counter. table_field.items.append(row[col]) count += 1 # If configured to read all rows of the DataStore table, continue. else: # Iterate over the rows of the DataStore table data. for row in result: # Add the row data for the column to the item list of the TableField. table_field.items.append(row[col]) # Determine the data type of the column's data. # A list that holds the data type for each data value in the column. data_types = [] # Iterate over each of the data values in the column. for item in table_field.items: # Add the data type of the item to the data_types list. Ignore data values that are None. if item is not None: data_types.append(type(item)) # If the data_types list is empty, assume that all values in the column are set to None. if not data_types: table_field.data_type = None # Set the data_type attribute of the TableField object to that specified in the data_types list. elif all(x == data_types[0] for x in data_types): table_field.data_type = data_types[0] # All of the data types in the list should be the same value because database columns require that the # data in each column is only one data type. If more than one data type exists in the data_types list, # print an error message. else: print( "There was an error. Not all the data types are the same." ) # Add the TableField object to the Table attributes. table.add_table_field(table_field) # Get the number of rows in the TableField. This will be the same number for each of the TableField # objects so only the count of the entries in the last TableField object is used in the remaining code. table.entry_count = len(table_field.items) # Iterate over the number of row entries. for i_row in range(table.entry_count): # Create a TableRecord object. table_record = TableRecord() # Iterate over the table fields. for i_col in range(len(table.table_fields)): # Get the data value for the specified row and the specified field. new_item = table.table_fields[i_col].items[i_row] # Assign that data value to the items list of the TableRecord. table_record.add_item(new_item) # Add the TableRecord object to the Table attributes. table.table_records.append(table_record) # Return the GeoProcessor Table object. return table def run_command(self): """ Run the command. Read the Table from the DataStore Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_DataStoreID = self.get_parameter_value("DataStoreID") pv_DataStoreTable = self.get_parameter_value("DataStoreTable") pv_Sql = self.get_parameter_value("Sql") pv_SqlFile = self.get_parameter_value("SqlFile") pv_Top = self.get_parameter_value("Top") pv_TableID = self.get_parameter_value("TableID") pv_IncludeColumns = self.get_parameter_value("IncludeColumns", default_value="*") pv_ExcludeColumns = self.get_parameter_value("ExcludeColumns", default_value="") # Expand for ${Property} syntax. pv_DataStoreID = self.command_processor.expand_parameter_value( pv_DataStoreID, self) pv_DataStoreTable = self.command_processor.expand_parameter_value( pv_DataStoreTable, self) pv_Sql = self.command_processor.expand_parameter_value(pv_Sql, self) pv_TableID = self.command_processor.expand_parameter_value( pv_TableID, self) # Convert the IncludeColumns and ExcludeColumns parameter values to lists. cols_to_include = string_util.delimited_string_to_list( pv_IncludeColumns) cols_to_exclude = string_util.delimited_string_to_list( pv_ExcludeColumns) # If available, convert the SqlFile parameter value relative path to an absolute path and expand for # ${Property} syntax. if pv_SqlFile: pv_SqlFile = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_SqlFile, self))) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_read_table(pv_SqlFile, pv_TableID, pv_DataStoreID): try: # Get the DataStore object datastore = self.command_processor.get_datastore( pv_DataStoreID) # Set the SQL statement to None until proof that SQL statement exists. sql_statement = None # If using the Sql method, the sql_statement is the user-provided sql statement. if pv_Sql: sql_statement = pv_Sql if '%' in sql_statement: sql_statement = sql_statement.replace('%', '%%') # If using the Sql method, the sql_statement is the user-provided sql statement within a file. if pv_SqlFile: # Get the SQL statement from the file. f = open(pv_SqlFile, 'r') sql_statement = f.read().strip() if '%' in sql_statement: sql_statement = sql_statement.replace('%', '%%') # Create the Table from the DataStore. table = self.__read_table_from_datastore( datastore, pv_DataStoreTable, pv_TableID, pv_Top, sql_statement, cols_to_include, cols_to_exclude) # Add the table to the GeoProcessor's Tables list. self.command_processor.add_table(table) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error reading Table {} from DataStore ({}).".format( pv_TableID, pv_DataStoreID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class RunCommands(AbstractCommand): """ The RunCommands command runs a command file. """ __command_parameter_metadata = [ CommandParameterMetadata("CommandFile", type("")), CommandParameterMetadata("ExpectedStatus", type("")) ] # Choices for ExpectedStatus, used to validate parameter and display in editor __choices_ExpectedStatus = ["Unknown", "Success", "Warning", "Failure"] __PASS = "******" __FAIL = "FAIL" def __init__(self): """ Initialize a new instance of the command. """ # AbstractCommand data super().__init__() self.command_name = "RunCommands" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "Run a command file using a separate command processor as a 'child' of the main processor.\n" "This command can be used to manage workflow where multiple command files are run, " "and is also used extensively for testing,\n" "where a test suite consists of running separate test case command files.") self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # CommandFile self.parameter_input_metadata['CommandFile.Description'] = "the command file to run" self.parameter_input_metadata['CommandFile.Label'] = "Command file" self.parameter_input_metadata['CommandFile.Required'] = True self.parameter_input_metadata['CommandFile.Tooltip'] = \ "The command file to run. A path relative to the master command file can be specified. Can use ${Property}." self.parameter_input_metadata['CommandFile.FileSelector.Type'] = "Read" self.parameter_input_metadata['CommandFile.FileSelector.Title'] = "Select command file to run" # ExpectedStatus self.parameter_input_metadata['ExpectedStatus.Description'] = "used for testing" self.parameter_input_metadata['ExpectedStatus.Label'] = "Expected status" self.parameter_input_metadata['ExpectedStatus.Tooltip'] = ( "Used for testing – indicates the expected status from the command, one of:\n\n" "Unknown\n" "Success\n" "Warning\n" "Failure\n\n" "If this parameter is NOT used, the command log messages from commands that are run will be\n" "appended to the RunCommands command log. However, using this parameter will not append those\n" "messages – this is used in automated testing to allow a successful test even when there are\n" "warning and failure messages.") self.parameter_input_metadata['ExpectedStatus.Value.Default'] = "Success" self.parameter_input_metadata['ExpectedStatus.Values'] = ["", "Unknown", "Success", "Warning", "Failure"] def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning_message = "" logger = logging.getLogger(__name__) # CommandFile is required pv_CommandFile = self.get_parameter_value(parameter_name='CommandFile', command_parameters=command_parameters) if not validators.validate_string(pv_CommandFile, False, False): message = "The CommandFile must be specified." recommendation = "Specify the command file." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # ExpectedStatus is optional, will default to Success at runtime pv_ExpectedStatus = self.get_parameter_value(parameter_name='ExpectedStatus', command_parameters=command_parameters) if not validators.validate_string_in_list(pv_ExpectedStatus, self.__choices_ExpectedStatus, True, True): message = "ExpectedStatus parameter is invalid." recommendation = "Specify the ExpectedStatus parameter as blank or one of " + \ str(self.__choices_ExpectedStatus) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning_message = command_util.validate_command_parameter_names(self, warning_message) # If any warnings were generated, throw an exception if len(warning_message) > 0: logger.warning(warning_message) raise ValueError(warning_message) # Refresh the phase severity self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def run_command(self): """ Run the command. Run a separate command file and save results to the current processor. Returns: None. Raises: ValueError: if a runtime input error occurs. RuntimeError: if a runtime error occurs. """ # The following import is deferred until runtime because if included at the top of the module # it causes a circular dependency and the GeoProcessor won't load from geoprocessor.core.CommandFileRunner import CommandFileRunner warning_count = 0 logger = logging.getLogger(__name__) # Get data for the command pv_CommandFile = self.get_parameter_value('CommandFile') pv_ExpectedStatus = self.get_parameter_value('ExpectedStatus') expected_status = pv_ExpectedStatus if pv_ExpectedStatus == "": pv_ExpectedStatus = None # Default - was not specified in the command # Runtime checks on input pv_CommandFile_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_CommandFile, self))) if warning_count > 0: message = "There were " + str(warning_count) + " warnings about command parameters." logger.warning(message) raise ValueError(message) # Write the output file try: command_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_CommandFile, self))) logger.info('Processing commands from file "' + command_file_absolute + '" using command file runner.') runner = CommandFileRunner() # This will set the initial working directory of the runner to that of the command file... file_found = True try: runner.read_command_file(command_file_absolute) except FileNotFoundError as e: warning_count += 1 message = 'File does not exist: "' + command_file_absolute + '"' self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "Confirm that the command file exists.")) # Set the following to skip code below file_found = False # If the command file is not enabled, don't need to initialize or process # TODO SAM 2013-04-20 Even if disabled, will still run discovery above # - need to disable discovery in this case is_enabled = runner.is_command_file_enabled() expected_status = str(CommandStatusType.SUCCESS) if pv_ExpectedStatus is not None: expected_status = pv_ExpectedStatus if not file_found: # Need to add logic to indicate a failed test self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord( CommandStatusType.FAILURE, "Command file does not exist.", "Confirm that the command file exists.")) # Set the results to fail test_pass_fail = self.__FAIL elif is_enabled: # TODO smalers, 2018-01-26 Java code set datastores here # TODO SAM 2010-09-30 Need to evaluate how to share properties - issue is that built-in properties are # handled explicitly whereas user-defined properties are in a list that can be easily shared. # Also, some properties like the working directory receive special treatment. # For now don't bite off the property issue runner.run_commands(env_properties=self.command_processor.env_properties) logger.info("Done running commands") # Total runtime for the commands # long run_time_total = TSCommandProcessorUtil.getRunTimeTotal(runner.getProcessor().getCommands()); # Set the CommandStatus for this command to the most severe status of the # commands file that was just run. max_severity = command_util.get_command_status_max_severity(runner.command_processor) logger.info("Max severity from commands = " + str(max_severity)) test_pass_fail = "????" # Status for the test, which is not always the same as max_severity if pv_ExpectedStatus is not None: expected_status_type = CommandStatusType.value_of(expected_status) if max_severity is expected_status_type: # Expected status matches the actual so consider this a success. # This should generally be used only when running a test that we expect to fail (e.g., run # obsolete command or testing handling of errors). self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.SUCCESS, "Severity for RunCommands (" + str(max_severity) + ") is max of commands in command file that was run - matches expected (" + expected_status + ") so RunCommands status=Success.", "Additional status messages are omitted to allow test to be success - " + "refer to log file if warning/failure.")) # TODO SAM 2008-07-09 Need to evaluate how to append all the log messages but still # have a successful status that shows in the displays. # DO NOT append the messages from the command because their status will cause the # error displays to show problem indicators. test_pass_fail = self.__PASS else: # Expected status and it does NOT match the actual status so this is a failure. self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.SUCCESS, "Severity for RunCommands (" + str(max_severity) + ") is max of commands in command file that was run - " + "does not match expected (" + expected_status + ") so RunCommands status=Failure.", "Check the command to confirm the expected status.")) # TODO SAM 2008-07-09 Need to evaluate how to append all the log messages but still # have a successful status that shows in the displays. # DO NOT append the messages from the command because their status will cause the # error displays to show problem indicators. test_pass_fail = self.__FAIL else: # TODO smalers 2018-01-28 evaluate whether this is needed given that success is default expected # status # Expected status is not specified self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord( max_severity, "Severity for RunCommands (" + str(max_severity) + ") is max of commands in command file that was run.", "Status messages from commands that were run are appended to RunCommand status messages.")) # Append the log records from the command file that was run. # The status contains lists of CommandLogRecord for each run mode. # For RunCommands() the log messages should be associated with the originating command, # not this RunCommand command logger.info("Appending log records") command_util.append_command_status_log_records( self.command_status, runner.command_processor.commands) if max_severity.value >= CommandStatusType.WARNING.value: test_pass_fail = self.__FAIL else: test_pass_fail = self.__PASS # Add a record to the regression test report... logger.info("Adding record to regression test report") run_time_total = 0 StartRegressionTestResultsReport.append_to_regression_test_report( is_enabled, run_time_total, test_pass_fail, expected_status, max_severity, command_file_absolute) # If it was requested to append the results to the calling processor, get # the results from the runner and do so... # if ( (AppendResults != null) && AppendResults.equalsIgnoreCase("true")) { # TSCommandProcessor processor2 = runner.getProcessor(); # Object o_tslist = processor2.getPropContents("TSResultsList"); # PropList request_params = new PropList ( "" ); # if ( o_tslist != null ) { # @SuppressWarnings("unchecked") # List<TS> tslist = (List<TS>)o_tslist; # int size = tslist.size() # TS ts; # for ( int i = 0; i < size; i++ ) { # ts = tslist.get(i); # request_params.setUsingObject( "TS", ts ); # processor.processRequest( "AppendTimeSeries", request_params ); # } # } # } logger.info("...done processing commands from file.") else: # Add a record to the regression report (the is_enabled value is what is important for the report # because the test is not actually run)... # TODO smalers 2018-01-26 finish... logger.info("Command file is not enabled") run_time_total = 0 test_pass_fail = "" max_severity = CommandStatusType.UNKNOWN StartRegressionTestResultsReport.append_to_regression_test_report( is_enabled, run_time_total, test_pass_fail, expected_status, max_severity, command_file_absolute) pass except Exception as e: warning_count += 1 message = 'Unexpected error running command file "' + pv_CommandFile_absolute + '"' traceback.print_exc(file=sys.stdout) logger.error(message, e, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) except: warning_count += 1 message = 'Unexpected error running command file "' + pv_CommandFile_absolute + '"' traceback.print_exc(file=sys.stdout) logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) if warning_count > 0: message = "There were " + str(warning_count) + " warnings processing the command." logger.warning(message) raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class CreateGeoLayerFromGeometry(AbstractCommand): """ Creates a new GeoLayer. The feature geometry is provided by the parameters. Command Parameters * NewGeoLayerID (str, required): The ID of the new GeoLayer. * GeometryFormat (str, required): The format of the input geometry. Can be `BoundingBox`, `WKT` or `WKB`. Refer to user documentation for descriptions of each geometry format. * GeometryData (str, required): The geometry data in the format specified by the GeometryFormat parameter. * CRS (str, required): The coordinate reference system of the new GeoLayer. The units of the GeometryData must match the units of the CRS. * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the NewGeoLayerID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("NewGeoLayerID", type("")), CommandParameterMetadata("GeometryFormat", type(str)), CommandParameterMetadata("GeometryData", type(str)), CommandParameterMetadata("CRS", type(str)), CommandParameterMetadata("IfGeoLayerIDExists", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "CreateGeoLayerFromGeometry" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Create a new GeoLayer from input geometry data." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # NewGeoLayerID self.parameter_input_metadata[ 'NewGeoLayerID.Description'] = "id of the new GeoLayer" self.parameter_input_metadata['NewGeoLayerID.Label'] = "New GeoLayerID" self.parameter_input_metadata['NewGeoLayerID.Required'] = True self.parameter_input_metadata[ 'NewGeoLayerID.Tooltip'] = "The ID of the new GeoLayer." # GeometryFormat self.parameter_input_metadata[ 'GeometryFormat.Description'] = "format of the geometry data" self.parameter_input_metadata[ 'GeometryFormat.Label'] = "Geometry format" self.parameter_input_metadata['GeometryFormat.Required'] = True self.parameter_input_metadata['GeometryFormat.Tooltip'] = ( "The format of the GeometryData. Choose from one of the options below.\n" "WKT: Well-Known Text is text representing vector geometry.\n" "BoundingBox: Bounding Box Coordinates are a list of 4 coordinates representing the minimum and maximum " "latitude and longitude of a POLYGON vector.\n" "WKB: Well-Known Binary is hexadecimal text representing vector geometry.\n" "Only available with QGIS version 3.0 or later.") self.parameter_input_metadata['GeometryFormat.Values'] = [ "", "WKT", "BoundingBox", "WKB" ] # GeometryData self.parameter_input_metadata[ 'GeometryData.Description'] = "geometry data for the new GeoLayer" self.parameter_input_metadata['GeometryData.Label'] = "Geometry data" self.parameter_input_metadata['GeometryData.Required'] = True self.parameter_input_metadata['GeometryData.Tooltip'] = ( "The geometry data for the new GeoLayer.\n" "The units are the same as the units of the coordinate reference system (CRS).\n" "If GeometryFormat is WKT... use the syntax provided in the reference.\n" "If GeometryFormat is BoundingBox... specify the coordinates as comma-separated values in the " "following order.\n" " the left bound (minimum longitude)\n" " the bottom bound (minimum latitude)\n" " the right bound (maximum longitude)\n" " the top (maximum latitude) bound \n" "If GeometryFormat is WKB... use the syntax provided in the reference." ) # CRS self.parameter_input_metadata[ 'CRS.Description'] = "coordinate references system of the new GeoLayer" self.parameter_input_metadata['CRS.Label'] = "CRS" self.parameter_input_metadata['CRS.Required'] = True self.parameter_input_metadata['CRS.Tooltip'] = ( "The coordinate reference system of the new GeoLayer. EPSG or " "ESRI code format required (e.g. EPSG:4326, EPSG:26913, ESRI:102003)." ) # IfGeoLayerIDExists self.parameter_input_metadata[ 'IfGeoLayerIDExists.Description'] = "action if output exists" self.parameter_input_metadata[ 'IfGeoLayerIDExists.Label'] = "If GeoLayerID exists" self.parameter_input_metadata['IfGeoLayerIDExists.Tooltip'] = ( "The action that occurs if the NewGeoLayerID already exists within the GeoProcessor.\n" "Replace: The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "No warning is logged.\n" "ReplaceAndWarn: The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "A warning is logged. \n" "Warn: The new GeoLayer is not created. A warning is logged. \n" "Fail: The new GeoLayer is not created. A fail message is logged.") self.parameter_input_metadata['IfGeoLayerIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfGeoLayerIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" parameters = ["NewGeoLayerID", "GeometryData", "CRS"] # Check that the parameters are non-empty, non-None strings. for parameter in parameters: parameter_value = self.get_parameter_value( parameter_name=parameter, command_parameters=command_parameters) if not validators.validate_string(parameter_value, False, False): message = "{} parameter has no value.".format(parameter) recommendation = "Specify the {} parameter.".format(parameter) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that GeometryFormat parameter is either `BoundingBox`, `WKT` or `WKB`. pv_GeometryFormat = self.get_parameter_value( parameter_name="GeometryFormat", command_parameters=command_parameters) acceptable_values = ["BoundingBox", "WKT", "WKB"] if not validators.validate_string_in_list(pv_GeometryFormat, acceptable_values, none_allowed=False, empty_string_allowed=False, ignore_case=True): message = "GeometryFormat parameter value ({}) is not recognized.".format( pv_GeometryFormat) recommendation = "Specify one of the acceptable values ({}) for the GeometryFormat parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional IfGeoLayerIDExists param is either `Replace`, `Warn`, `Fail`, `ReplaceAndWarn` or None. pv_IfGeoLayerIDExists = self.get_parameter_value( parameter_name="IfGeoLayerIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "Warn", "Fail", "ReplaceAndWarn"] if not validators.validate_string_in_list(pv_IfGeoLayerIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfGeoLayerIDExists parameter value ({}) is not recognized.".format( pv_IfGeoLayerIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfGeoLayerIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_geolayer_be_created(self, geolayer_id, crs, geometry_format, geometry_data): """ Checks the following: * the CRS is a valid CRS * the ID of the new GeoLayer is unique (not an existing GeoLayer ID) * if the GeometryFormat is BoundingBox, check that the string has 4 items Args: geolayer_id: the id of the GeoLayer to be created crs: the crs code of the GeoLayer to be created geometry_format: the format that the geometry data is delivered geometry_data: the geometry data (as a string) Returns: Boolean. If TRUE, the GeoLayer should be simplified If FALSE, at least one check failed and the GeoLayer should not be simplified. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the CRS is not a valid coordinate reference system code, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsCRSCodeValid", "CRS", crs, "FAIL")) # If the new GeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE # (depends on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "NewGeoLayerID", geolayer_id, None)) # If the GeometryFormat is BoundingBox, continue with the checks. if geometry_format.upper() == "BOUNDINGBOX": # If the GeometryData string does not contain 4 items when converted to a list, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsListLengthCorrect", "GeometryData", geometry_data, "FAIL", other_values=[",", 4])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Create the GeoLayer with the input geometries. Add GeoLayer to the GeoProcessor's geolayers list. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_NewGeoLayerID = self.get_parameter_value("NewGeoLayerID") pv_GeometryFormat = self.get_parameter_value("GeometryFormat").upper() pv_GeometryData = self.get_parameter_value("GeometryData") pv_CRS = self.get_parameter_value("CRS") if self.__should_geolayer_be_created(pv_NewGeoLayerID, pv_CRS, pv_GeometryFormat, pv_GeometryData): try: # If the geometry format is bounding box, continue. if pv_GeometryFormat == "BOUNDINGBOX": # Convert the geometry input from a string to a list of strings. # Items are in the following order: # 1. Left (West) bound coordinate # 2. Bottom (South) bound coordinate # 3. Right (East) bound coordinate # 4. Top (North) bound coordinate NSWE_extents = string_util.delimited_string_to_list( pv_GeometryData) NW = "{} {}".format(NSWE_extents[0], NSWE_extents[3]) NE = "{} {}".format(NSWE_extents[2], NSWE_extents[3]) SE = "{} {}".format(NSWE_extents[2], NSWE_extents[1]) SW = "{} {}".format(NSWE_extents[0], NSWE_extents[1]) wkt_conversion = "POLYGON(({}, {}, {}, {}))".format( NW, NE, SE, SW) # Create the QgsVectorLayer. BoundingBox will always create a POLYGON layer. layer = qgis_util.create_qgsvectorlayer( "Polygon", pv_CRS, "layer") # Create the QgsGeometry object for the bounding box geometry. qgs_geometry = qgis_util.create_qgsgeometry( "WKT", wkt_conversion) # If the geometry format is Well-Known Text, continue. elif pv_GeometryFormat == "WKT": # Get the equivalent QGS geometry type to the input WKT geometry. # Ex: MultiLineString is converted to LineString. qgsvectorlayer_geom_type = qgis_util.get_geometrytype_qgis_from_wkt( pv_GeometryData) # Create the QgsVectorLayer. The geometry type will be determined from the WKT specifications. layer = qgis_util.create_qgsvectorlayer( qgsvectorlayer_geom_type, pv_CRS, "layer") # Create the QgsGeometry object for the Well-Known Text geometry. qgs_geometry = qgis_util.create_qgsgeometry( "WKT", pv_GeometryData) # If the geometry format is Well-Known Binary, continue. elif pv_GeometryFormat == "WKB": # Create the QgsGeometry object for the Well-Known Binary geometry. qgs_geometry = qgis_util.create_qgsgeometry( "WKB", pv_GeometryData) # Get the equivalent Well-Known Text for the geometry. qgs_geometry_as_wkt = qgs_geometry.exportToWkt() # Get the equivalent QGS geometry type to the input WKT geometry. # Ex: MultiLineString is converted to LineString. qgsvectorlayer_geom_type = qgis_util.get_geometrytype_qgis_from_wkt( qgs_geometry_as_wkt) # Create the QgsVectorLayer. The geometry type will be determined from the WKB specifications. layer = qgis_util.create_qgsvectorlayer( qgsvectorlayer_geom_type, pv_CRS, "layer") # Add the feature (with the appropriate geometry) to the Qgs Vector Layer. qgis_util.add_feature_to_qgsvectorlayer(layer, qgs_geometry) # Create a new GeoLayer with the QgsVectorLayer and add it to the GeoProcesor's geolayers list. new_geolayer = GeoLayer(pv_NewGeoLayerID, layer, "MEMORY") self.command_processor.add_geolayer(new_geolayer) # Raise an exception if an unexpected error occurs during the process. except Exception as e: self.warning_count += 1 message = "Unexpected error creating GeoLayer ({}).".format( pv_NewGeoLayerID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class CopyFile(AbstractCommand): """ The CopyFile command copies a source file to a destination copy. The command is useful as a utility and is often used in automated testing to provide input data from a saved copy. """ __command_parameter_metadata = [ CommandParameterMetadata("SourceFile", type("")), CommandParameterMetadata("DestinationFile", type("")), CommandParameterMetadata("IfSourceFileNotFound", type("")) ] # Choices for IfSourceFileNotFound, used to validate parameter and display in editor __choices_IfSourceFileNotFound = ["Ignore", "Warn", "Fail"] def __init__(self): """ Initialize a new instance of the command. """ # AbstractCommand data super().__init__() self.command_name = "CopyFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Copy a source file to a destination." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # SourceFile self.parameter_input_metadata[ 'SourceFile.Description'] = "the name of the source file to copy" self.parameter_input_metadata['SourceFile.Label'] = "Source file" self.parameter_input_metadata['SourceFile.Required'] = True self.parameter_input_metadata['SourceFile.Tooltip'] = \ "The name of the source file to copy. Can be specified using ${Property}." self.parameter_input_metadata['SourceFile.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'SourceFile.FileSelector.Title'] = "Select the source file to copy" # DestinationFile self.parameter_input_metadata[ 'DestinationFile.Description'] = "the name of the destination file" self.parameter_input_metadata[ 'DestinationFile.Label'] = "Destination file" self.parameter_input_metadata['DestinationFile.Required'] = True self.parameter_input_metadata['DestinationFile.Tooltip'] = \ "The name of the destination file. Can be specified using ${Property}." self.parameter_input_metadata[ 'DestinationFile.FileSelector.Type'] = "Write" self.parameter_input_metadata[ 'DestinationFile.FileSelector.Title'] = "select the destination file" # IfSourceFileNotFound self.parameter_input_metadata[ 'IfSourceFileNotFound.Description'] = "action if file not found" self.parameter_input_metadata[ 'IfSourceFileNotFound.Label'] = "If not found?" self.parameter_input_metadata['IfSourceFileNotFound.Tooltip'] = ( "Indicate an action if the source file is not found:\n\n" "Ignore (ignore the missing file and do not warn)\n" "Warn (generate a warning message)\n" "Fail (generate a failure message). ") self.parameter_input_metadata['IfSourceFileNotFound.Values'] = [ "", "Ignore", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfSourceFileNotFound.Value.Default'] = "Warn" def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning_message = "" logger = logging.getLogger(__name__) # SourceFile is required pv_SourceFile = self.get_parameter_value( parameter_name='SourceFile', command_parameters=command_parameters) if not validators.validate_string(pv_SourceFile, False, False): message = "The SourceFile must be specified." recommendation = "Specify the source file." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # DestinationFile is required pv_DestinationFile = self.get_parameter_value( parameter_name='DestinationFile', command_parameters=command_parameters) if not validators.validate_string(pv_DestinationFile, False, False): message = "The DestinationFile must be specified." recommendation = "Specify the destination file." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # IfSourceFileNotFound is optional, defaults to Warn at runtime pv_IfNotFound = self.get_parameter_value( parameter_name='IfSourceFileNotFound', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_IfNotFound, self.__choices_IfSourceFileNotFound, True, True): message = "IfSourceFileNotFound parameter is invalid." recommendation = "Specify the IfSourceFileNotFound parameter as blank or one of " + \ str(self.__choices_IfSourceFileNotFound) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning_message = command_util.validate_command_parameter_names( self, warning_message) # If any warnings were generated, throw an exception if len(warning_message) > 0: logger.warning(warning_message) raise ValueError(warning_message) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def run_command(self): """ Run the command. Copy the source file to the destination. Returns: Nothing. Raises: RuntimeError: if a runtime input error occurs. """ warning_count = 0 logger = logging.getLogger(__name__) # Get data for the command pv_SourceFile = self.get_parameter_value('SourceFile') pv_DestinationFile = self.get_parameter_value('DestinationFile') # Runtime checks on input pv_SourceFile_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_SourceFile, self))) pv_DestinationFile_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_DestinationFile, self))) if warning_count > 0: message = "There were " + str( warning_count) + " warnings about command parameters." logger.warning(message) raise ValueError(message) # Do the processing try: input_count = 2 if not os.path.exists(pv_SourceFile_absolute): warning_count += 1 message = 'The source file does not exist: "' + pv_SourceFile_absolute + '"' self.command_status.addToLog( CommandPhaseType.RUN, CommandLogRecord( CommandStatusType.FAILURE, message, "Verify that the source exists at the time the command is run." )) logger.warning(message) input_count -= 1 destination_folder = os.path.dirname(pv_DestinationFile_absolute) if not os.path.exists(destination_folder): warning_count += 1 message = 'The destination folder does not exist: "' + destination_folder + '"' self.command_status.addToLog( CommandPhaseType.RUN, CommandLogRecord( CommandStatusType.FAILURE, message, "Verify that the destination folder exists at the time the command is run." )) input_count -= 1 if input_count == 2: # Try to do the copy logger.info('Copying file "' + pv_SourceFile_absolute + '" to "' + pv_DestinationFile_absolute + '"') copyfile(pv_SourceFile_absolute, pv_DestinationFile_absolute) except Exception as e: warning_count += 1 message = 'Unexpected error copying file "' + pv_SourceFile_absolute + '" to "' + \ pv_DestinationFile_absolute + '"' traceback.print_exc(file=sys.stdout) logger.exception(message, e) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class WriteGeoLayerToGeoJSON(AbstractCommand): """ Writes a GeoLayer to a spatial data file in GeoJSON format. This command writes a GeoLayer registered within the geoprocessor to a GeoJSON spatial data file. The GeoJSON spatial data file can then be viewed within a GIS, moved within folders on the local computer, packaged for delivery, etc. Registered GeoLayers are stored as GeoLayer objects within the geoprocessor's GeoLayers list. Each GeoLayer has one feature type (point, line, polygon, etc.) and other data (an identifier, a coordinate reference system, etc). This function only writes one single GeoLayer to a single spatial data file in GeoJSON format. Command Parameters * GeoLayerID (str, required): the identifier of the GeoLayer to be written to a spatial data file in GeoJSON format. * OutputFile (str, required): the relative pathname of the output spatial data file. * OutputCRS (str, EPSG code, optional): the coordinate reference system that the output spatial data file will be projected. By default, the output spatial data file will be projected to the GeoLayer's current CRS. * OutputPrecision (int, 0-15, optional): the precision (number of integers behind the GeoJSON geometry's decimal point) of the output spatial data file in GeoJSON format. Must be at or between 0 and 15. By default, the precision parameter is set to 5. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("OutputFile", type("")), CommandParameterMetadata("OutputCRS", type("")), CommandParameterMetadata("OutputPrecision", type(2))] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "WriteGeoLayerToGeoJSON" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = "Write a GeoLayer to a file in GeoJSON format." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # GeoLayerID self.parameter_input_metadata['GeoLayerID.Description'] = "identifier of the GeoLayer" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Required'] = True self.parameter_input_metadata['GeoLayerID.Tooltip'] = "The GeoLayer identifier, can use ${Property}." # OutputFile self.parameter_input_metadata['OutputFile.Description'] = "property file to write" self.parameter_input_metadata['OutputFile.Label'] = "Output file" self.parameter_input_metadata['OutputFile.Required'] = True self.parameter_input_metadata['OutputFile.Tooltip'] = ( "The output GeoJSON file (relative or absolute path). ${Property} syntax is recognized. " "${Property} syntax is recognized.") self.parameter_input_metadata['OutputFile.FileSelector.Type'] = "Write" self.parameter_input_metadata['OutputFile.FileSelector.Title'] = "Select GeoJSON file to write" # OutputCRS self.parameter_input_metadata['OutputCRS.Description'] = "coordinate reference system of output " self.parameter_input_metadata['OutputCRS.Label'] = "Output CRS" self.parameter_input_metadata['OutputCRS.Tooltip'] = ( "The coordinate reference system of the output GeoJSON.\nEPSG or ESRI code format required " "(e.g. EPSG:4326, EPSG:26913, ESRI:102003).\n" "If the output CRS is different than the CRS of the GeoLayer, the output GeoJSON is reprojected " "to the new CRS.") self.parameter_input_metadata['OutputCRS.Value.Default.Description'] = "the GeoLayer's CRS" # OutputPrecision self.parameter_input_metadata['OutputPrecision.Description'] = "number of decimal points in output" self.parameter_input_metadata['OutputPrecision.Label'] = "Output precision" self.parameter_input_metadata['OutputPrecision.Tooltip'] = ( "The number of decimal points to include in the output GeoJSON file's coordinates. " "Must be a positive integer at or between 0 and 15.\n" "The precision of coordinate values can greatly impact the size of the file and precision of drawing " "the features.\n" "For example, a higher OutputPrecision value increases the output GeoJSON file size and " "increases the geometry's precision.") self.parameter_input_metadata['OutputPrecision.Value.Default'] = "5" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter GeoLayerID is a non-empty, non-None string. # - existence of the GeoLayer will also be checked in run_command(). pv_GeoLayerID = self.get_parameter_value(parameter_name='GeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerID, False, False): message = "GeoLayerID parameter has no value." recommendation = "Specify the GeoLayerID parameter to indicate the GeoLayer to write." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter OutputFile is a non-empty, non-None string. # - existence of the folder will also be checked in run_command(). pv_OutputFile = self.get_parameter_value(parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, False, False): message = "OutputFile parameter has no value." recommendation = "Specify the OutputFile parameter (relative or absolute pathname) to indicate the " \ "location and name of the output spatial data file in GeoJSON format." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_write_geolayer(self, geolayer_id, output_file_abs, output_precision): """ Checks the following: * the ID of the GeoLayer is an existing GeoLayer ID * the output folder is a valid folder * the output precision is at or between 0 and 15 Args: geolayer_id: the ID of the GeoLayer to be written output_file_abs: the full pathname to the output file output_precision (int): the precision of the output GeoJSON file Returns: Boolean. If TRUE, the GeoLayer should be written. If FALSE, at least one check failed and the GeoLayer should not be written. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayer ID is not an existing GeoLayer ID, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsGeoLayerIdExisting", "GeoLayerID", geolayer_id, "FAIL")) # If the folder of the OutputFile file path is not a valid folder, raise a FAILURE. should_run_command.append(validators.run_check(self, "DoesFilePathHaveAValidFolder", "OutputFile", output_file_abs, "FAIL")) # If the output precision is not at or between 0 and 15, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsIntBetweenRange", "OutputPrecision", output_precision, "FAIL", other_values=[0, 15])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Write the GeoLayer to a spatial data file in GeoJSON format. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values except for the OutputCRS pv_GeoLayerID = self.get_parameter_value("GeoLayerID") pv_OutputPrecision = int(self.get_parameter_value("OutputPrecision", default_value=5)) pv_OutputFile = self.get_parameter_value("OutputFile") # Expand for ${Property} syntax. pv_GeoLayerID = self.command_processor.expand_parameter_value(pv_GeoLayerID, self) # Convert the OutputFile parameter value relative path to an absolute path and expand for ${Property} syntax output_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_OutputFile, self))) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_write_geolayer(pv_GeoLayerID, output_file_absolute, pv_OutputPrecision): try: # Get the GeoLayer geolayer = self.command_processor.get_geolayer(pv_GeoLayerID) # Get the current coordinate reference system (in EPSG code) of the current GeoLayer geolayer_crs = geolayer.get_crs() # Obtain the parameter value of the OutputCRS pv_OutputCRS = self.get_parameter_value("OutputCRS", default_value=geolayer_crs) # Write the GeoLayer to a spatial data file in GeoJSONformat qgis_util.write_qgsvectorlayer_to_geojson(geolayer.qgs_vector_layer, output_file_absolute, pv_OutputCRS, pv_OutputPrecision) # Save the output file in the processor self.command_processor.add_output_file(output_file_absolute) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error writing GeoLayer {} to GeoJSON format.".format(pv_GeoLayerID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format(self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class MergeGeoLayers(AbstractCommand): """ Merges GeoLayers with the same geometry into one output GeoLayer. * All features from the input GeoLayers are retained within the output GeoLayer. * The attributes of the input GeoLayers are retained within the output GeoLayer attribute tables. * Attributes that share the same name will be converged in the output GeoLayer attribute tables. * Attributes that are unique to an input GeoLayer are included in the output GeoLayer attribute tables - features from GeoLayers that don't have that unique attribute will have an attribute value of '0' for that unique field. * Attributes from different input GeoLayers (with different names) that are meant to be converged in the output GeoLayer can be managed with the AttributeMap. Command Parameters * GeoLayerIDs (list of strings, required): a list of the IDs of the GeoLayers to be merged. Can be '*' where all registered GeoLayers within the GeoProcessor are merged. * OutputGeoLayerID (string, required): the ID of the output GeoLayer, the merged GeoLayer. * AttributeMap (string, optional): a string that can convert to a list. Each item is separated by a comma. Each item is an entry pair separated by a ':'. The key of the pair is the old attribute name to be renamed. The value of the pair is the new name that the old attribute will be mapped to in the merged output GeoLayer. Default = "" * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the OutputGeoLayerID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("GeoLayerIDs", type([])), CommandParameterMetadata("OutputGeoLayerID", type("")), CommandParameterMetadata("AttributeMap", type({})), CommandParameterMetadata("IfGeoLayerIDExists", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "MergeGeoLayers" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Merge two or more GeoLayers into one GeoLayer." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # GeoLayerIDs self.parameter_input_metadata['GeoLayerIDs.Description'] =\ "comma-separated list of identifiers for GeoLayers to merge" self.parameter_input_metadata['GeoLayerIDs.Label'] = "GeoLayersIDs" self.parameter_input_metadata['GeoLayerIDs.Required'] = True self.parameter_input_metadata['GeoLayerIDs.Tooltip'] = ( "A comma-separated list of the identifiers of the GeoLayers to merge. \n\n" "GeoLayerID1, GeoLayerID2, ... \n\n" "Can also be * where all GeoLayers registered within the GeoProcessor are merged. " "${Property} syntax is recognized.") # OutputGeoLayerID self.parameter_input_metadata[ 'OutputGeoLayerID.Description'] = "output GeoLayerID" self.parameter_input_metadata[ 'OutputGeoLayerID.Label'] = "Output GeoLayerID" self.parameter_input_metadata['OutputGeoLayerID.Required'] = True self.parameter_input_metadata[ 'OutputGeoLayerID.Tooltip'] = "A GeoLayer identifier for the output GeoLayer." # AttributeMap self.parameter_input_metadata[ 'AttributeMap.Description'] = "new names for the output geolayer attributes" self.parameter_input_metadata['AttributeMap.Label'] = "Attribute map" self.parameter_input_metadata['AttributeMap.Tooltip'] = ( "Specify new names for the output geolayer attributes.\n\n" "OriginalAttributeName1:MergedAttributeName1, \n" "OriginalAttributeName2:MergedAttributeName2 \n\n" "See the Attribute Map section for more information.") self.parameter_input_metadata[ 'AttributeMap.Value.Default.Description'] = "original attribute names" # IfGeoLayerIDExists self.parameter_input_metadata[ 'IfGeoLayerIDExists.Label'] = "If GeoLayerID exists" self.parameter_input_metadata['IfGeoLayerIDExists.Tooltip'] = ( "The action that occurs if the OutputGeoLayerID already exists within the GeoProcessor. \n" "Replace : The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "No warning is logged. \n" "ReplaceAndWarn: The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "A warning is logged. \n" "Warn : The new GeoLayer is not created. A warning is logged. \n" "Fail : The new GeoLayer is not created. A fail message is logged." ) self.parameter_input_metadata['IfGeoLayerIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfGeoLayerIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter GeoLayerIDs is a non-empty, non-None string. pv_GeoLayerIDs = self.get_parameter_value( parameter_name='GeoLayerIDs', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerIDs, False, False): message = "GeoLayerIDs parameter has no value." recommendation = "Specify the GeoLayerIDs parameter to indicate the GeoLayers to merge." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter OutputGeoLayerID is a non-empty, non-None string. pv_OutputGeoLayerID = self.get_parameter_value( parameter_name='OutputGeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_OutputGeoLayerID, False, False): message = "OutputGeoLayerID parameter has no value." recommendation = "Specify the OutputGeoLayerID parameter to indicate the ID of the merged GeoLayer." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_merge(self, geolayer_id_list, output_geolayer_id): """ Checks the following: * the input GeoLayer IDs are existing GeoLayer IDs * the input GeoLayers are all the same CRS (warning) * the input GeoLayers are all the same geometry. * the ID of the output GeoLayer is unique (not an existing GeoLayer ID) Args: geolayer_id_list (list of strings): a list of the input GeoLayer IDs, the GeoLayers to merge output_geolayer_id (str): the ID of the output GeoLayer, the merged GeoLayer. Returns: run_merge: Boolean. If TRUE, the merge process should be run. If FALSE, it should not be run. """ # Boolean to determine if the merge process should be run. Set to true until an error occurs. run_merge = True # Boolean to determine if all of the input GeoLayers exist within the GeoProcessor. TRUE until proven FALSE. input_geolayers_exist = True # Iterate over the input GeoLayer IDs for geolayer_id in geolayer_id_list: # If the GeoLayer ID is not an existing GeoLayer ID, raise a FAILURE. if not self.command_processor.get_geolayer(geolayer_id): run_merge = False input_geolayers_exist = False self.warning_count += 1 message = 'The GeoLayerID ({}) is not a valid GeoLayer ID.'.format( geolayer_id) recommendation = 'Specify a valid GeoLayerID.' self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If all of the input GeoLayers exist, check that the they have the same CRS and the same geometry. if input_geolayers_exist: # A list of the GeoLayer's geometries and a list of teh GeoLayer's CRS. geom_list = [] crs_list = [] # Iterate over the input GeoLayers. for geolayer_id in geolayer_id_list: # Get the GeoLayer. geolayer = self.command_processor.get_geolayer(geolayer_id) # Get the GeoLayer's geometry. geom_list.append(geolayer.get_geometry()) # Get the GeoLayer's CRS. crs_list.append(geolayer.get_crs()) # If the input GeoLayers have different geometries, raise a FAILURE. if len(set(geom_list)) > 1: run_merge = False self.warning_count += 1 message = 'The input GeoLayers ({}) have different geometries ({}).'.format( geolayer_id_list, geom_list) recommendation = 'Specify input GeoLayers that have the same geometry.' self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the input GeoLayers have different CRS, raise a WARNING. if len(set(crs_list)) > 1: self.warning_count += 1 message = 'The input GeoLayers ({}) have different coordinate' \ ' reference systems ({}).'.format(geolayer_id_list, geom_list) recommendation = 'Specify input GeoLayers that have the same CRS.' self.logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # If the output_geolayer_id is the same as an already-registered GeoLayerID, react according to the # pv_IfGeoLayerIDExists value. elif self.command_processor.get_geolayer(output_geolayer_id): # Get the IfGeoLayerIDExists parameter value. pv_IfGeoLayerIDExists = self.get_parameter_value( "IfGeoLayerIDExists", default_value="Replace") # Warnings/recommendations if the geolayer_id is the same as a registered GeoLayerID message = 'The GeoLayer ID ({}) value is already in use as a GeoLayer ID.'.format( output_geolayer_id) recommendation = 'Specify a new GeoLayerID.' # The registered GeoLayer should be replaced with the new GeoLayer (with warnings). if pv_IfGeoLayerIDExists.upper() == "REPLACEANDWARN": self.warning_count += 1 self.logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # The registered GeoLayer should not be replaces. A warning should be logged. if pv_IfGeoLayerIDExists.upper() == "WARN": run_merge = False self.warning_count += 1 self.logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # The matching IDs should cause a FAILURE. elif pv_IfGeoLayerIDExists.upper() == "FAIL": run_merge = False self.warning_count += 1 self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Return the Boolean to determine if the merge process should be run. If TRUE, all checks passed. If FALSE, # one or many checks failed. return run_merge @staticmethod def __create_attribute_dictionary(geolayer, attribute_map): """ Create an attribute dictionary for the GeoLayer. * An attribute dictionary is a dictionary of entries that determines how/if the attribute names should be renamed. Each entry represents one of the GeoLayer's attributes. Key: the name of the existing GeoLayer attribute. Value: the name that the GeoLayer attribute should be renamed to. * The values of the attribute dictionary are determined by the logic within the attribute_map. * The attribute map is a user-defined dictionary that determines how the input GeoLayers' attributes should be manipulated in the merged output GeoLayer. Each entry represents one of the output GeoLayer's attributes. Key: the name of the attribute in output GeoLayer Value: a list of existing input GeoLayer attributes that should be renamed to the corresponding key Args: geolayer (object): the input GeoLayer attribute_map (dictionary): a dictionary of how/if the input GeoLayer's attributes should be renamed Returns: attribute_dictionary (dictionary). Read description above. """ # Get a list of the GeoLayer's existing attribute names. existing_attribute_names = geolayer.get_attribute_field_names() # A dictionary of entries that determines how/if the existing attribute names should be renamed. attribute_dictionary = {} # Iterate over each of GeoLayer's existing attributes. for existing_attribute_name in existing_attribute_names: # Boolean to determine if the existing_attribute_name should be renamed. Set to FALSE until proven TRUE. should_be_renamed = False # Iterate over the attribute map. for new_attr_name, list_of_existing_attr_names_to_rename in attribute_map.items( ): # If the existing attribute name should be renamed then return the new name. if existing_attribute_name in list_of_existing_attr_names_to_rename: should_be_renamed = True output_attribute_name = new_attr_name break # If the existing attribute name should not be renamed, return the existing attribute name. if not should_be_renamed: output_attribute_name = existing_attribute_name # Add the key, value pair to the attribute dictionary. attribute_dictionary[ existing_attribute_name] = output_attribute_name # Return the attribute dictionary. return attribute_dictionary def run_command(self): # Get the command parameter values. pv_GeoLayerIDs = self.get_parameter_value("GeoLayerIDs") pv_OutputGeoLayerID = self.get_parameter_value("OutputGeoLayerID") pv_AttributeMap = self.get_parameter_value("AttributeMap", default_value="") # Expand for ${Property} syntax. pv_GeoLayerIDs = self.command_processor.expand_parameter_value( pv_GeoLayerIDs, self) # Convert the AttributeMap parameter from string to a list of mapping entries. attribute_map_entry_list = string_util.delimited_string_to_list( pv_AttributeMap, delimiter=',') # The attribute map dictionary contains the attributes of the output merged GeoLayer and the corresponding # attributes of the input GeoLayers. # key (str): an attribute of the output, merged GeoLayer # value (list): a list of attributes from the input GeoLayers that should be mapped to the output attribute attribute_map_dic = {} # Iterate over each attribute mapping entry. for attribute_map_entry in attribute_map_entry_list: # Get a list of the keys (merged attributes) currently in the attribute map dictionary. curr_merged_attributes = list(attribute_map_dic.keys()) # If the attribute map entry has the correct format, continue. if ':' in attribute_map_entry: # Get the output merged attribute name from the entry. merged_attr = attribute_map_entry.split(':')[1].strip() # Get the input attribute name from the entry. input_attr = attribute_map_entry.split(':')[0].strip() # If the attribute map entry does has the correct format, the merged_attr and the input_attr variables # are set to empty strings. else: merged_attr = '' input_attr = '' # If the merged attribute name is already registered in the attribute mapping dictionary. if merged_attr in curr_merged_attributes: # Add the input attribute to the list of input attributes within the dictionary (associated with # the corresponding merged_attribute). curr_input_attrs = attribute_map_dic[merged_attr] curr_input_attrs.append(input_attr) attribute_map_dic[merged_attr] = curr_input_attrs # If the merged attribute is not already registered in the attribute mapping dictionary, add the input # attribute (as a one-item list) to the dictionary (associated with the corresponding merged_attribute). else: attribute_map_dic[merged_attr] = [input_attr] # Convert the GeoLayerIDs parameter from string to list format. # If configured, list all of the registered GeoLayer IDs. if pv_GeoLayerIDs == "*": list_of_geolayer_ids = [] # Iterate over each GeoLayer registered within the GeoProcessor. Add each GeoLayer's ID to the list. for geolayer_obj in self.command_processor.geolayers: list_of_geolayer_ids.append(geolayer_obj.id) # If specific GeoLayer IDs are listed, convert the string into list format. else: list_of_geolayer_ids = string_util.delimited_string_to_list( pv_GeoLayerIDs) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_merge(list_of_geolayer_ids, pv_OutputGeoLayerID): try: # A list to hold the GeoLayer IDs of the copied GeoLayers. Copied GeoLayers are only required for this # command. They will be removed from the GeoProcessor (to save processing space and speed) after the # processing has been completed. This list will be used to remove the copied GeoLayers. copied_geolayer_ids = [] # A list to hold the full pathname of the copied GeoLayers (written to disk). The # qgis:mergevectorlayers requires that the QGSVectorLayer objects are not in memory. This list will be # used as an input to the qgis:mergevectorlayers algorithm. copied_geolayer_sourcepath = [] first_geolayer = self.command_processor.get_geolayer( list_of_geolayer_ids[0]) first_crs = first_geolayer.get_crs() # Iterate over the GeoLayers to be merged. for geolayer_id in list_of_geolayer_ids: # Get the appropriate GeoLayer based on the GeoLayer ID. geolayer = self.command_processor.get_geolayer(geolayer_id) # Get an attribute dictionary mapping the GeoLayer attributes that are to be renamed. # Key: Existing attribute name. Value: New attribute name. attribute_dictionary = self.__create_attribute_dictionary( geolayer, attribute_map_dic) # Make a copy of the GeoLayer and add it to the GeoProcessor. Renaming of attributes will occur on # a copy of the GeoLayer so that the original GeoLayer's attribute values are not affected. copied_geolayer = geolayer.deepcopy( "{}_copyForMerge".format(geolayer_id)) self.command_processor.add_geolayer(copied_geolayer) # Add the copied GeoLayer ID to the master list. copied_geolayer_ids.append(copied_geolayer.id) # Iterate over the GeoLayer's attribute dictionary. for existing_attr_name, new_attr_name in attribute_dictionary.items( ): # If the attribute should be renamed, then rename the attribute in the copied GeoLayer. if not (existing_attr_name == new_attr_name): copied_geolayer.rename_attribute( existing_attr_name, new_attr_name) # Write copied GeoLayer (memory) to the temporary directory (written to disk). output_file_absolute = os.path.join( self.command_processor.get_property('TempDir'), copied_geolayer.id) on_disk_geolayer = copied_geolayer.write_to_disk( output_file_absolute) # Overwrite the copied (memory) GeoLayer in the geoprocessor with the on-disk GeoLayer. self.command_processor.add_geolayer(on_disk_geolayer) # Add the source path of the copied on-disk GeoLayer to the master list. copied_geolayer_sourcepath.append( on_disk_geolayer.source_path) # Merge all of the copied GeoLayers (the GeoLayers with the new attribute names). # Using QGIS algorithm but can also use saga:mergelayers algorithm. # saga:mergelayers documentation at http://www.saga-gis.org/saga_tool_doc/2.3.0/shapes_tools_2.html alg_parameters = { "LAYERS": copied_geolayer_sourcepath, "CRS": first_crs, "OUTPUT": "memory:" } merged_output = self.command_processor.qgis_processor.runAlgorithm( "qgis:mergevectorlayers", alg_parameters) # Create a new GeoLayer and add it to the GeoProcessor's geolayers list. # in QGIS3, merged_output["OUTPUT"] returns the returns the QGS vector layer object # see ClipGeoLayer.py for information about value in QGIS2 environment self.command_processor.add_geolayer( GeoLayer(pv_OutputGeoLayerID, merged_output["OUTPUT"], "MEMORY")) # Release the copied GeoLayers from the GeoProcessor. for copied_geolayer_id in copied_geolayer_ids: # Get the copied GeoLayer based on the GeoLayer ID. copied_geolayer = self.command_processor.get_geolayer( copied_geolayer_id) # Remove the copied GeoLayer from the GeoProcessor self.command_processor.free_geolayer(copied_geolayer) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error merging the following GeoLayers {}.".format( pv_GeoLayerIDs) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class ReadGeoLayerFromGeoJSON(AbstractCommand): """ Reads a GeoLayer from a GeoJSON spatial data file. This command reads a layer from a GeoJSON file and creates a GeoLayer object within the geoprocessor. The GeoLayer can then be accessed in the geoprocessor by its identifier and further processed. GeoLayers are stored on a computer or are available for download as a spatial data file (GeoJSON, shapefile, feature class in a file geodatabase, etc.). Each GeoLayer has one feature type (point, line, polygon, etc.) and other data (an identifier, a coordinate reference system, etc). This function reads a single GeoLayer from a single GeoJSON file in GeoJSON format (consistent with the fact that GeoJSON files store one layer). In order for the geoprocessor to use and manipulate spatial data files, GeoLayers are instantiated as `QgsVectorLayer <https://qgis.org/api/classQgsVectorLayer.html>`_ objects. Command Parameters * SpatialDataFile (str, required): the relative pathname to the spatial data file (GeoJSON format) * GeoLayerID (str, optional): the GeoLayer identifier. If None, the spatial data filename (without the .geojson extension) will be used as the GeoLayer identifier. For example: If GeoLayerID is None and the absolute pathname to the spatial data file is C:/Desktop/Example/example_file.geojson, then the GeoLayerID will be `example_file`. * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the CopiedGeoLayerID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("SpatialDataFile", type("")), CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("IfGeoLayerIDExists", type(""))] # Choices for IfGeoLayerIDExists, used to validate parameter and display in editor __choices_IfGeoLayerIDExists = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "ReadGeoLayerFromGeoJSON" self.command_parameter_metadata = self.__command_parameter_metadata self.choices_IfGeoLayerIDExists = self.__choices_IfGeoLayerIDExists # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = "Read a GeoLayer from a file in GeoJSON format." self.command_metadata['EditorType'] = "Simple" # Parameter Metadata self.parameter_input_metadata = dict() # SpatialDataFile self.parameter_input_metadata['SpatialDataFile.Description'] = "GeoJSON file to read" self.parameter_input_metadata['SpatialDataFile.Label'] = "GeoJSON file" self.parameter_input_metadata['SpatialDataFile.Tooltip'] = ( "The GeoJSON file to read (relative or absolute path). ${Property} syntax is recognized.") self.parameter_input_metadata['SpatialDataFile.Required'] = True self.parameter_input_metadata['SpatialDataFile.FileSelector.Type'] = "Read" self.parameter_input_metadata['SpatialDataFile.FileSelector.Title'] = "Select a GeoJSON file to read" # GeoLayerID self.parameter_input_metadata['GeoLayerID.Description'] = "output GeoLayer identifier" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Tooltip'] =\ "A GeoLayer identifier. Formatting characters and ${Property} syntax are recognized." self.parameter_input_metadata['GeoLayerID.Value.Default'] = ( "The GeoJSON filename without the leading path and without the file extension.") # IfGeoLayerIDExists self.parameter_input_metadata['IfGeoLayerIDExists.Description'] = "action if exists" self.parameter_input_metadata['IfGeoLayerIDExists.Label'] = "If GeoLayerID exists" self.parameter_input_metadata['IfGeoLayerIDExists.Tooltip'] = ( "The action that occurs if the GeoLayerID already exists within the GeoProcessor.\n" "Replace : The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "No warning is logged.\n" "ReplaceAndWarn: The existing GeoLayer within the GeoProcessor is overwritten with the new " "GeoLayer. A warning is logged. \n" " Warn : The new GeoLayer is not created. A warning is logged. \n" " Fail : The new GeoLayer is not created. A fail message is logged.") self.parameter_input_metadata['IfGeoLayerIDExists.Values'] = ["", "Replace", "ReplaceAndWarn", "Warn", "Fail"] self.parameter_input_metadata['IfGeoLayerIDExists.Value.Default'] = "Replace" # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter SpatialDataFile is a non-empty, non-None string. # - existence of the file will also be checked in run_command(). pv_SpatialDataFile = self.get_parameter_value(parameter_name='SpatialDataFile', command_parameters=command_parameters) if not validators.validate_string(pv_SpatialDataFile, False, False): message = "SpatialDataFile parameter has no value." recommendation = "Specify the SpatialDataFile parameter to indicate the spatial data layer file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfGeoLayerIDExists is one of the acceptable values or is None. pv_IfGeoLayerIDExists = self.get_parameter_value(parameter_name="IfGeoLayerIDExists", command_parameters=command_parameters) if not validators.validate_string_in_list(pv_IfGeoLayerIDExists, self.__choices_IfGeoLayerIDExists, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfGeoLayerIDExists parameter value ({}) is not recognized.".format(pv_IfGeoLayerIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfGeoLayerIDExists parameter.".format( self.__choices_IfGeoLayerIDExists) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_read_geolayer(self, spatial_data_file_abs, geolayer_id): """ Checks the following: * the SpatialDataFile (absolute) is a valid file * the SpatialDataFile (absolute) ends in .GEOJSON (warning, not error) * the ID of the output GeoLayer is unique (not an existing GeoLayer ID) Args: spatial_data_file_abs: the full pathname to the input spatial data file geolayer_id: the ID of the output GeoLayer Returns: run_read: Boolean. If TRUE, the read process should be run. If FALSE, the read process should not be run. """ # Boolean to determine if the read process should be run. Set to true until an error occurs. run_read = True # If the input spatial data file is not a valid file path, raise a FAILURE. if not os.path.isfile(spatial_data_file_abs): run_read = False self.warning_count += 1 message = "The SpatialDataFile ({}) is not a valid file.".format(spatial_data_file_abs) recommendation = "Specify a valid file." self.logger.error(message) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the input spatial data file does not end in .geojson, raise a WARNING. if not spatial_data_file_abs.upper().endswith(".GEOJSON"): self.warning_count += 1 message = 'The SpatialDataFile ({}) does not end with the .geojson extension.'.format(spatial_data_file_abs) recommendation = "No recommendation logged." self.logger.warning(message) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # If the GeoLayerID is the same as an already-registered GeoLayerID, react according to the # pv_IfGeoLayerIDExists value. elif self.command_processor.get_geolayer(geolayer_id): # Get the IfGeoLayerIDExists parameter value. pv_IfGeoLayerIDExists = self.get_parameter_value("IfGeoLayerIDExists", default_value="Replace") # Warnings/recommendations if the GeolayerID is the same as a registered GeoLayerID. message = 'The GeoLayerID ({}) value is already in use as a GeoLayer ID.'.format(geolayer_id) recommendation = 'Specify a new GeoLayerID.' # The registered GeoLayer should be replaced with the new GeoLayer (with warnings). if pv_IfGeoLayerIDExists.upper() == "REPLACEANDWARN": self.warning_count += 1 self.logger.warning(message) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # The registered GeoLayer should not be replaced. A warning should be logged. if pv_IfGeoLayerIDExists.upper() == "WARN": run_read = False self.warning_count += 1 self.logger.warning(message) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.WARNING, message, recommendation)) # The matching IDs should cause a FAILURE. elif pv_IfGeoLayerIDExists.upper() == "FAIL": run_read = False self.warning_count += 1 self.logger.error(message) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Return the Boolean to determine if the read process should be run. If TRUE, all checks passed. If FALSE, # one or many checks failed. return run_read def run_command(self): """ Run the command. Read the layer file from a GeoJSON file, create a GeoLayer object, and add to the GeoProcessor's geolayer list. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ logger = logging.getLogger(__name__) # Obtain the parameter values. pv_SpatialDataFile = self.get_parameter_value("SpatialDataFile") pv_GeoLayerID = self.get_parameter_value("GeoLayerID", default_value='%f') # Expand for ${Property} syntax. pv_GeoLayerID = self.command_processor.expand_parameter_value(pv_GeoLayerID, self) # Convert the SpatialDataFile parameter value relative path to an absolute path and expand for ${Property} # syntax spatial_data_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_SpatialDataFile, self))) # If the pv_GeoLayerID is a valid %-formatter, assign the pv_GeoLayerID the corresponding value. if pv_GeoLayerID in ['%f', '%F', '%E', '%P', '%p']: pv_GeoLayerID = io_util.expand_formatter(spatial_data_file_absolute, pv_GeoLayerID) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_read_geolayer(spatial_data_file_absolute, pv_GeoLayerID): try: # Create a QGSVectorLayer object with the GeoJSON SpatialDataFile. qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_file(spatial_data_file_absolute) # Create a GeoLayer and add it to the geoprocessor's GeoLayers list. geolayer_obj = GeoLayer(geolayer_id=pv_GeoLayerID, geolayer_qgs_vector_layer=qgs_vector_layer, geolayer_source_path=spatial_data_file_absolute) self.command_processor.add_geolayer(geolayer_obj) # Raise an exception if an unexpected error occurs during the process. except Exception as e: self.warning_count += 1 message = "Unexpected error reading GeoLayer {} from GeoJSON file {}.".format(pv_GeoLayerID, pv_SpatialDataFile) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format(self.warning_count) logger.error(message) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class ReadGeoLayersFromFGDB(AbstractCommand): """ Reads the GeoLayers (feature classes) within a file geodatabase (FGDB). This command reads the GeoLayers from a file geodatabase and creates GeoLayer objects within the geoprocessor. The GeoLayers can then be accessed in the geoprocessor by their identifier and further processed. GeoLayers are stored on a computer or are available for download as a spatial data file (GeoJSON, shapefile, feature class in a file geodatabase, etc.). Each GeoLayer has one feature type (point, line, polygon, etc.) and other data (an identifier, a coordinate reference system, etc). Note that this function only reads one or many GeoLayers (feature classes) from within a single file geodatabase. In order for the geoprocessor to use and manipulate spatial data files, GeoLayers are instantiated as `QgsVectorLayer <https://qgis.org/api/classQgsVectorLayer.html>`_ objects. This command will read the GeoLayers from the feature classes within a file geodatabase and instantiate them as geoprocessor GeoLayer objects. Command Parameters * SpatialDataFolder (str, required): the relative pathname to the file geodatabase containing spatial data files (feature classes) * ReadOnlyOneFeatureClass (str, required): a string that can be converted to a valid boolean value. If TRUE, only one specific feature class is read in as a GeoLayer. If FALSE, multiple feature classes are read in as different GeoLayers. * FeatureClass (str, required if ReadOnlyOneFeatureClass is TRUE): the name of the feature class within the geodatabase to read. * GeoLayerID (str, required if ReadOnlyOneFeatureClass is TRUE): the GeoLayer identifier. * GeoLayerID_prefix (str, optional, only used if ReadOnlyOneFeatureClass is FALSE): the GeoLayer identifier will, by default, use the name of the feature class that is being read. However, if a value is set for this parameter, the GeoLayerID will follow this format: [GeoLayerID_prefix]_[name_of_feature_class]. * Subset_Pattern (str, optional, only used if ReadOnlyOneFeatureClass is FALSE): the glob-style pattern of the feature class name to determine which feature classes within the file geodatabase are to be processed. More information on creating a glob pattern can be found at https://docs.python.org/2/library/glob.html. * IfGeoLayerIDExists (str, optional): This parameter determines the action that occurs if the GeoLayerID already exists within the GeoProcessor. Available options are: `Replace`, `ReplaceAndWarn`, `Warn` and `Fail` (Refer to user documentation for detailed description.) Default value is `Replace`. Used if ReadOnlyOneFeatureClass is TRUE or FALSE. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("SpatialDataFolder", type("")), CommandParameterMetadata("ReadOnlyOneFeatureClass", type("")), CommandParameterMetadata("FeatureClass", type("")), CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("GeoLayerID_prefix", type("")), CommandParameterMetadata("Subset_Pattern", type("")), CommandParameterMetadata("IfGeoLayerIDExists", type("")) ] def __init__(self): """ Initialize the command """ # AbstractCommand data super().__init__() self.command_name = "ReadGeoLayersFromFGDB" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Read one or more GeoLayerss from an Esri File Geodatabase." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # SpatialDataFolder self.parameter_input_metadata[ 'SpatialDataFolder.Description'] = "file geodatabase to read" self.parameter_input_metadata[ 'SpatialDataFolder.Label'] = "Spatial data folder" self.parameter_input_metadata['SpatialDataFolder.Required'] = True self.parameter_input_metadata[ 'SpatialDataFolder.Tooltip'] = "The file geodatbase to read (must end in .gdb)." self.parameter_input_metadata[ 'SpatialDataFolder.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'SpatialDataFolder.FileSelector.Title'] = "Select the file geodatabase to read." # ReadOnlyOneFeatureClass self.parameter_input_metadata[ 'ReadOnlyOneFeatureClass.Description'] = "whether to read one feature class" self.parameter_input_metadata[ 'ReadOnlyOneFeatureClass.Label'] = "Read only one feature class?" self.parameter_input_metadata[ 'ReadOnlyOneFeatureClass.Required'] = True self.parameter_input_metadata['ReadOnlyOneFeatureClass.Tooltip'] = ( "If TRUE, only one feature class will be read as a GeoLayer. Must specify a valid feature class name. \n" "If FALSE, one or more feature classes will be read as GeoLayers. " "Can specify the Subset_Pattern to select which feature classes to read." ) self.parameter_input_metadata['ReadOnlyOneFeatureClass.Values'] = [ "", "TRUE", "FALSE" ] # IfGeoLayerIDExists self.parameter_input_metadata[ 'IfGeoLayerIDExists.Description'] = "action if output exists" self.parameter_input_metadata[ 'IfGeoLayerIDExists.Label'] = "If GeoLayerID exists" self.parameter_input_metadata['IfGeoLayerIDExists.Tooltip'] = ( "The action that occurs if the GeoLayerID already exists within the GeoProcessor.\n" "Replace : The existing GeoLayer within the GeoProcessor is overwritten with the new GeoLayer. " "No warning is logged.\n" "ReplaceAndWarn: The existing GeoLayer within the GeoProcessor is overwritten with the new " "GeoLayer. A warning is logged.\n" "Warn : The new GeoLayer is not created. A warning is logged.\n" "Fail : The new GeoLayer is not created. A fail message is logged." ) self.parameter_input_metadata['IfGeoLayerIDExists.Values'] = [ "", "Replace", "ReplaceAndWarn", "Warn", "Fail" ] self.parameter_input_metadata[ 'IfGeoLayerIDExists.Value.Default'] = "Replace" # FeatureClass self.parameter_input_metadata[ 'FeatureClass.Description'] = "name of feature class to read" self.parameter_input_metadata['FeatureClass.Label'] = "Feature class" self.parameter_input_metadata['FeatureClass.Required'] = True self.parameter_input_metadata['FeatureClass.Tooltip'] =\ "The name of the feature class within the file geodatabase to read. ${Property} syntax is recognized." # GeoLayerID self.parameter_input_metadata[ 'GeoLayerID.Description'] = "output GeoLayer identifier" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Required'] = True self.parameter_input_metadata['GeoLayerID.Tooltip'] = \ "A GeoLayer identifier. Formatting characters and ${Property} syntax are recognized." # GeoLayerID_prefix self.parameter_input_metadata[ 'GeoLayerID_prefix.Description'] = "a GeoLayer identifier prefix" self.parameter_input_metadata[ 'GeoLayerID_prefix.Label'] = "GeoLayerID prefix" self.parameter_input_metadata['GeoLayerID_prefix.Tooltip'] =\ "GeoLayers read from a file geodatabase have an identifier in the GeoLayerID_prefix_FeatureClass format." self.parameter_input_metadata['GeoLayerID_prefix.Value.Default'] =\ "No prefix is used. The GeoLayerID is the name of the feature class." # Subset_Pattern self.parameter_input_metadata[ 'Subset_Pattern.Description'] = "globstyle pattern of feature classes to read" self.parameter_input_metadata[ 'Subset_Pattern.Label'] = "Subset pattern" self.parameter_input_metadata['Subset_Pattern.Tooltip'] =\ "The glob-style pattern (e.g., CO_* or *_[MC]O) of feature classes to read from the file geodatabase." self.parameter_input_metadata['Subset_Pattern.Value.Default'] =\ "No pattern is used. All feature classes within the file geodatabase are read." # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter SpatialDataFolder is a non-empty, non-None string. # - existence of the folder will also be checked in run_command(). pv_SpatialDataFolder = self.get_parameter_value( parameter_name='SpatialDataFolder', command_parameters=command_parameters) if not validators.validate_string(pv_SpatialDataFolder, False, False): message = "SpatialDataFolder parameter has no value." recommendation = "Specify text for the SpatialDataFolder parameter to indicate the file geodatabase." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter IfGeoLayerIDExists is either `Replace`, `ReplaceAndWarn`, `Warn`, `Fail`, None. pv_IfGeoLayerIDExists = self.get_parameter_value( parameter_name="IfGeoLayerIDExists", command_parameters=command_parameters) acceptable_values = ["Replace", "ReplaceAndWarn", "Warn", "Fail"] if not validators.validate_string_in_list(pv_IfGeoLayerIDExists, acceptable_values, none_allowed=True, empty_string_allowed=True, ignore_case=True): message = "IfGeoLayerIDExists parameter value ({}) is not recognized.".format( pv_IfGeoLayerIDExists) recommendation = "Specify one of the acceptable values ({}) for the IfGeoLayerIDExists parameter.".format( acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that the optional parameter ReadOnlyOneFeatureClass is a valid Boolean. pv_ReadOnlyOneFeatureClass = self.get_parameter_value( parameter_name="ReadOnlyOneFeatureClass", command_parameters=command_parameters) if not validators.validate_bool(pv_ReadOnlyOneFeatureClass, True, False): message = "ReadOnlyOneFeatureClass is not a valid boolean value." recommendation = "Specify a valid boolean value for the ReadOnlyOneFeatureClass parameter." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Continue with checks if the ReadOnlyOneFeatureClass is a valid TRUE Boolean. elif string_util.str_to_bool(pv_ReadOnlyOneFeatureClass): # Check that parameter GeoLayerID is a non-empty, non-None string. pv_GeoLayerID = self.get_parameter_value( parameter_name='GeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerID, False, False): message = "GeoLayerID parameter has no value." recommendation = "Specify the GeoLayerID parameter." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __return_a_list_of_fc(self, fgdb_full_path): # The file geodatabase will be read and each feature class will be added to the feature_class_list. feature_class_list = [] # Append each feature class in the Esri File Geodatabase to the feature_class_list. # REF: https://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.html| # "Get all layers in an Esri File GeoDataBase" ogr.UseExceptions() driver = ogr.GetDriverByName("OpenFileGDB") gdb = driver.Open(fgdb_full_path) for feature_class_idx in range(gdb.GetLayerCount()): feature_class = gdb.GetLayerByIndex(feature_class_idx) feature_class_list.append(feature_class.GetName()) return feature_class_list def __should_read_gdb(self, spatial_data_folder_abs): """ Checks the following: * the SpatialDataFolder (absolute) is a valid folder * the SpatialDataFolder (absolute) is a valid File GeoDatabase Args: spatial_data_folder_abs (str): the full pathname to the input spatial data folder Returns: Boolean. If TRUE, the GeoDatabase should be read. If FALSE, at least one check failed and the GeoDatabase should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the input spatial data folder is not a valid file path, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "SpatialDataFolder", spatial_data_folder_abs, "FAIL")) # If the input spatial data folder is valid, continue with the checks. if False not in should_run_command: # If the input spatial data folder is not a file geodatabase, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderAfGDB", "SpatialDataFolder", spatial_data_folder_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def __should_read_geolayer(self, geolayer_id, one_geolayer_bool, fc_name=None, spatial_data_folder_abs=None): """ Checks the following: * if only one geolayer is being read, the FeatureClass is an existing feature class within the File GeoDatabase * the ID of the output GeoLayer is unique (not an existing GeoLayer ID) Args: geolayer_id (str): the ID of the output GeoLayer one_geolayer_bool (bool): if True, the command is only reading one FC from the FGDB fc_name (str): the name of the FC being read. Only used if one_geolayer_bool is True. Default = None spatial_data_folder_abs (str): the full pathname to the input spatial data folder. Only used if one_geolayer_bool is True. Default = None Returns: Boolean. If TRUE, the GeoLayer should be read. If FALSE, at least one check failed and the GeoLayer should not be read. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayerID is the same as an already-existing GeoLayerID, raise a WARNING or FAILURE (depends # on the value of the IfGeoLayerIDExists parameter.) should_run_command.append( validators.run_check(self, "IsGeoLayerIdUnique", "GeoLayerID", geolayer_id, None)) # If only one geolayer is being read from the file geodatabase, continue. if one_geolayer_bool: # If the provided feature class is not in the FGDB raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFeatureClassInFGDB", "FeatureClass", fc_name, "FAIL", other_values=[spatial_data_folder_abs])) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Read the feature classes within a file geodatabase. For each desired feature class (can be specified by the Subset_Pattern parameter), create a GeoLayer object, and add to the GeoProcessor's geolayer list. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the required and optional parameter values pv_SpatialDataFolder = self.get_parameter_value("SpatialDataFolder") pv_ReadOnlyOneFeatureClass = self.get_parameter_value( "ReadOnlyOneFeatureClass") pv_Subset_Pattern = self.get_parameter_value("Subset_Pattern") pv_GeoLayerID_prefix = self.get_parameter_value("GeoLayerID_prefix") pv_GeoLayerID = self.get_parameter_value("GeoLayerID") pv_FeatureClass = self.get_parameter_value("FeatureClass") # Convert the ReadOnlyOneFeatureClass from a string value to a Boolean value. pv_ReadOnlyOneFeatureClass = string_util.str_to_bool( pv_ReadOnlyOneFeatureClass) # Convert the SpatialDataFolder parameter value relative path to an absolute path sd_folder_abs = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_SpatialDataFolder, self))) # Run the initial checks on the parameter values. Only continue if the checks passed. if self.__should_read_gdb(sd_folder_abs): # If configured to only read one Feature Class into one GeoLayer. if pv_ReadOnlyOneFeatureClass: # Run the check to see if the GeoLayer should be read. if self.__should_read_geolayer(pv_GeoLayerID, True, pv_FeatureClass, sd_folder_abs): try: # Get the full pathname to the feature class # TODO egiles 2018-01-04 Need to research how to properly document feature class source path spatial_data_file_absolute = os.path.join( sd_folder_abs, str(pv_FeatureClass)) # Create a QgsVectorLayer object from the feature class. qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_feature_class( sd_folder_abs, pv_FeatureClass) # Create a GeoLayer and add it to the geoprocessor's GeoLayers list geolayer_obj = GeoLayer(pv_GeoLayerID, qgs_vector_layer, spatial_data_file_absolute) self.command_processor.add_geolayer(geolayer_obj) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error reading feature class ({}) from file geodatabase ({}).".format( pv_FeatureClass, sd_folder_abs) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If configured to read multiple Feature Classes into multiple GeoLayers. else: # Get a list of all of the feature classes in the file geodatabase. fc_list = self.__return_a_list_of_fc(sd_folder_abs) # Filter the fc_list to only include feature classes that meet the Subset Pattern configuration. If # the Subset Pattern configuration is None, all feature classes will remain in the fc_list. fc_list = string_util.filter_list_of_strings( fc_list, [pv_Subset_Pattern]) # Iterate over the feature classes in the fc_list. for feature_class in fc_list: # Determine the GeoLayerID. if pv_GeoLayerID_prefix: geolayer_id = "{}_{}".format(pv_GeoLayerID_prefix, feature_class) else: geolayer_id = feature_class # Run the secondary checks on the parameter values. Only continue if the checks passed. if self.__should_read_geolayer(geolayer_id, False): try: # Get the full pathname to the feature class # TODO egiles 2018-01-04 Need to research how to properly document feature class source path spatial_data_file_absolute = os.path.join( sd_folder_abs, str(feature_class)) # Create a QgsVectorLayer object from the feature class. qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_feature_class( sd_folder_abs, feature_class) # Create a GeoLayer and add it to the geoprocessor's GeoLayers list geolayer_obj = GeoLayer( geolayer_id, qgs_vector_layer, spatial_data_file_absolute) self.command_processor.add_geolayer(geolayer_obj) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error reading feature class ({}) from file geodatabase ({}).".format( feature_class, sd_folder_abs) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class If(AbstractCommand): """ The If command starts an If block. """ __command_parameter_metadata = [ CommandParameterMetadata("Name", type("")), CommandParameterMetadata("Condition", type("")), CommandParameterMetadata("CompareAsStrings", type(True)) # Not yet implemented ] def __init__(self): """ Initialize the command instance. """ super().__init__() # AbstractCommand data self.command_name = "If" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "The If command evaluates a conditional statement and if true will " "result in the commands between If and matching EndIf being executed. " ) self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # Name self.parameter_input_metadata['Name.Description'] = \ "the name of the If command to be matched with the EndIf command" self.parameter_input_metadata['Name.Label'] = "Name" self.parameter_input_metadata['Name.Required'] = True self.parameter_input_metadata['Name.Tooltip'] = ( "The name of the If command, which will be matched with the name of an EndIf command to indicate\n" "the block of commands in the if condition.") # Condition self.parameter_input_metadata[ 'Condition.Description'] = "the conditional statement to evaluate" self.parameter_input_metadata['Condition.Label'] = "Condition" self.parameter_input_metadata['Condition.Required'] = True self.parameter_input_metadata[ 'Condition.Tooltip'] = "The conditional statement to evaluate." # CompareAsStrings self.parameter_input_metadata[ 'CompareAsStrings.Description'] = "boolean" self.parameter_input_metadata[ 'CompareAsStrings.Label'] = "Compare As Strings" self.parameter_input_metadata['CompareAsStrings.Tooltip'] = ( "If True, the comparison will be done as strings even if the values could be treated as numbers or " "Booleans. ") self.parameter_input_metadata[ 'CompareAsStrings.Value.Default'] = "FALSE" # Local private data self.__condition_eval = True # The result of evaluating the condition def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" logger = logging.getLogger(__name__) # Unlike most commands, set internal data here because it is needed by initial call to next() # before calls to run_command # Name is required pv_Name = self.get_parameter_value( parameter_name='Name', command_parameters=command_parameters) if not validators.validate_string(pv_Name, False, False): message = "A name for the If block must be specified" recommendation = "Specify the Name." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Condition is required pv_Condition = self.get_parameter_value( parameter_name='Condition', command_parameters=command_parameters) if not validators.validate_string(pv_Condition, False, False): message = "A condition for the If command must be specified" recommendation = "Specify the condition." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception if len(warning) > 0: logger.warn(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def get_condition_eval(self): """ Return the result of evaluating the condition, which is set when run_command() is called. Returns: Return the result (bool) of evaluating the condition, which is set when run_command() is called. """ return self.__condition_eval def get_name(self): """ Return the name of the If (will match name of corresponding EndIf). Returns: The name of the If (will match name of corresponding EndIf). """ return self.get_parameter_value("Name") # The logic of this command closely matches the TSTool If command # - could possibly improve but for now implement something basic that works def run_command(self): """ Run the command. Initializes the If. Returns: None. """ logger = logging.getLogger(__name__) debug = True # Use for troubleshooting warning_count = 0 # General count for issues pv_Name = self.get_parameter_value("Name") pv_Condition = self.get_parameter_value("Condition") condition_upper = None if pv_Condition is not None: condition_upper = pv_Condition.upper() pv_CompareAsStrings = self.get_parameter_value("CompareAsStrings") compare_as_strings = False if pv_CompareAsStrings is not None and pv_CompareAsStrings.upper == "TRUE": compare_as_strings = True # TODO smalers 2018-02-18 need to add other special conditions such as empty properties, GeoLayer exists, etc. # - see TSTool code try: condition_eval = False self.__condition_eval = condition_eval if pv_Condition is not None and pv_Condition != "": # Condition is specified, rather than special check. # First determine the condition operator. # TODO SAM 2013-12-07 Figure out if there is a more elegant way to do this # Currently only Value1 Operator Value2 is allowed. Brute force split by finding the operator logger.info('Evaluating Condition="' + pv_Condition + '"') pos1 = -1 pos2 = -1 value1 = "" value2 = "" op = "??" if pv_Condition.find("<=") > 0: pos = pv_Condition.find("<=") op = "<=" pos1 = pos pos2 = pos + 2 elif pv_Condition.find("<") > 0: pos = pv_Condition.find("<") op = "<" pos1 = pos pos2 = pos + 1 elif pv_Condition.find(">=") > 0: pos = pv_Condition.find(">=") op = ">=" pos1 = pos pos2 = pos + 2 elif pv_Condition.find(">") > 0: pos = pv_Condition.find(">") op = ">" pos1 = pos pos2 = pos + 1 elif pv_Condition.find("==") > 0: pos = pv_Condition.find("==") op = "==" pos1 = pos pos2 = pos + 2 elif pv_Condition.find("!=") > 0: pos = pv_Condition.find("!=") op = "!=" pos1 = pos pos2 = pos + 2 elif condition_upper.find("!CONTAINS") > 0: # Put this before the next "CONTAINS" operator pos = condition_upper.find("!CONTAINS") op = "!CONTAINS" pos1 = pos pos2 = pos + 9 compare_as_strings = True # "!contains" is only used on strings elif condition_upper.find("CONTAINS") > 0: pos = condition_upper.find("CONTAINS") op = "CONTAINS" pos1 = pos pos2 = pos + 8 compare_as_strings = True # "contains" is only used on strings elif pv_Condition.find("=") > 0: message = "Bad use of = in condition." recommendation = "Use == to check for equality." logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) else: message = 'Unknown condition operator for "' + pv_Condition + '"' recommendation =\ "Make sure condition operator is supported - refer to command editor and documentation." logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) logger.info('operator="' + op + '" pos1=' + str(pos1) + " pos2=" + str(pos2)) # Now evaluate the left and right sides of the condition arg1 = pv_Condition[0:pos1].strip() if debug: logger.info( 'Left side of condition before property expansion check: "' + str(arg1) + '"') if arg1.find("${") >= 0: value1 = self.command_processor.expand_parameter_value( arg1, self) if debug: logger.info( "Left side of condition after property expansion: " + value1) else: value1 = arg1 if debug: logger.info("Left side (no expansion needed): " + value1) arg2 = pv_Condition[pos2:].strip() if debug: logger.info( 'Right side of condition before property expansion check: "' + str(arg2) + '"') if arg2.find("${") >= 0: value2 = self.command_processor.expand_parameter_value( arg2, self) if debug: logger.info("Right side after property expansion: " + value2) else: value2 = arg2 if debug: logger.info("Right side (no expansion needed): " + value2) # If the arguments are quoted, then all of the following will be false is_value1_int = string_util.is_int(value1) is_value2_int = string_util.is_int(value2) is_value1_float = string_util.is_float(value1) is_value2_float = string_util.is_float(value2) is_value1_bool = string_util.is_bool(value1) is_value2_bool = string_util.is_bool(value2) # Strip surrounding double quotes for comparisons below - do after above checks for type value1 = value1.replace('"', "") value2 = value2.replace('"', "") if not compare_as_strings and is_value1_int and is_value2_int: # Do an integer comparison ivalue1 = int(value1) ivalue2 = int(value2) if op == "<=": if ivalue1 <= ivalue2: condition_eval = True elif op == "<": if ivalue1 < ivalue2: condition_eval = True elif op == ">=": if ivalue1 >= ivalue2: condition_eval = True elif op == ">": if ivalue1 > ivalue2: condition_eval = True elif op == "==": if ivalue1 == ivalue2: condition_eval = True elif op == "!=": if ivalue1 != ivalue2: condition_eval = True elif not compare_as_strings and is_value1_float and is_value2_float: # Compare floats fvalue1 = float(value1) fvalue2 = float(value2) if op == "<=": if fvalue1 <= fvalue2: condition_eval = True elif op == "<": if fvalue1 < fvalue2: condition_eval = True elif op == ">=": if fvalue1 >= fvalue2: condition_eval = True elif op == ">": if fvalue1 > fvalue2: condition_eval = True elif op == "==": if fvalue1 == fvalue2: condition_eval = True elif op == "!=": if fvalue1 != fvalue2: condition_eval = True elif not compare_as_strings and is_value1_bool and is_value2_bool: # Do a boolean comparison # - bool("") is False, every other string is True! bvalue1 = string_util.str_to_bool(value1) bvalue2 = string_util.str_to_bool(value2) if debug: logger.info('Evaluating boolean condition "' + str(bvalue1) + " " + op + " " + str(bvalue2)) if op == "<=": if not bvalue1: # false <= false or true (does not matter what right side is) condition_eval = True elif op == "<": if not bvalue1 and bvalue2: # false < true condition_eval = True elif op == ">=": if bvalue1: # true >= false or true (does not matter what right side is) condition_eval = True elif op == ">": if bvalue1 and not bvalue2: # true > false condition_eval = True elif op == "==": if bvalue1 == bvalue2: condition_eval = True elif op == "!=": if bvalue1 != bvalue2: condition_eval = True elif compare_as_strings or\ (not is_value1_int and not is_value2_int and not is_value1_float and not is_value2_float and not is_value1_bool and not is_value2_bool): # Always compare the string values or the input is not other types so assume strings if op == "CONTAINS": if value1.find(value2) >= 0: condition_eval = True elif op == "!CONTAINS": if value1.find(value2) < 0: condition_eval = True else: # Do a comparison of the strings to figure out lexicographically order if value1 < value2: comp = -1 elif value1 == value2: comp = 0 else: comp = 1 if op == "<=": if comp <= 0: condition_eval = True elif op == "<": if comp < 0: condition_eval = True elif op == ">=": if comp >= 0: condition_eval = True elif op == ">": if comp > 0: condition_eval = True elif op == "==": if comp == 0: condition_eval = True elif op == "!=": if comp != 0: condition_eval = True else: message = 'Left and right have different type - cannot evaluate condition "' + pv_Condition + '"' recommendation = "Make sure data types on each side of operator are the same - " + \ "refer to command editor and documentation." logger.warning(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) if debug: logger.info("Condition evaluated to: " + str(condition_eval)) # For now leave the following two messages in to reinforce to user the evaluation # - may only output when in debug mode in the future if pv_Condition.find("${") >= 0: # Show the original message = pv_Condition + " (showing ${Property} notation) evaluates to " + str( condition_eval) recommendation = "Informational message." self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.SUCCESS, message, recommendation)) # Always also show the expanded message = str(value1) + " " + op + " " + str( value2) + " evaluates to " + str(condition_eval) recommendation = "Informational message." self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.SUCCESS, message, recommendation)) self.__condition_eval = condition_eval except Exception as e: warning_count += 1 traceback.print_exc( file=sys.stdout) # Formatting of error seems to have issue message = 'Unexpected error in If, Name="' + pv_Name + '"' logger.error(message, e, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "Check the log file for details.")) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class WriteGeoLayerToKML(AbstractCommand): """ Writes a GeoLayer to a spatial data file in KML format. This command writes a GeoLayer registered within the geoprocessor to a KML spatial data file. The KML spatial data file can then be viewed within Google Earth, moved within folders on the local computer, packaged for delivery, etc. Registered GeoLayers are stored as GeoLayer objects within the geoprocessor's GeoLayers list. Each GeoLayer has one feature type (point, line, polygon, etc.) and other data (an identifier, a coordinate reference system, etc). This function only writes one single GeoLayer to a single spatial data file in GeoJSON format. Command Parameters * GeoLayerID (str, required): the identifier of the GeoLayer to be written to a spatial data file in GeoJSON format. * OutputFile (str, required): the relative pathname of the output spatial data file. * PlacemarkNameAttribute (str, optional): Allows you to specify the field to use for the KML <name> element. Default: Name * PlacemarkDescriptionAttribute (str, optional): Allows you to specify the field to use for the KML <description> element. Default: Description """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("GeoLayerID", type("")), CommandParameterMetadata("OutputFile", type("")), CommandParameterMetadata("PlacemarkNameAttribute", type("")), CommandParameterMetadata("PlacemarkDescriptionAttribute", type(""))] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "WriteGeoLayerToKML" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = "Write a GeoLayer to a file in KML format." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # GeoLayerID self.parameter_input_metadata['GeoLayerID.Description'] = "identifier of the GeoLayer to write" self.parameter_input_metadata['GeoLayerID.Label'] = "GeoLayerID" self.parameter_input_metadata['GeoLayerID.Required'] = True self.parameter_input_metadata['GeoLayerID.Tooltip'] = "The GeoLayer identifier, can use ${Property}." # OutputFile self.parameter_input_metadata['OutputFile.Description'] = "property file to write" self.parameter_input_metadata['OutputFile.Label'] = "Output file" self.parameter_input_metadata['OutputFile.Required'] = True self.parameter_input_metadata['OutputFile.Tooltip'] = \ "The output KML file (relative or absolute path). ${Property} syntax is recognized." self.parameter_input_metadata['OutputFile.FileSelector.Type'] = "Write" self.parameter_input_metadata['OutputFile.FileSelector.Title'] = "Select file to write output" # PlacemarkNameAttribute self.parameter_input_metadata['PlacemarkNameAttribute.Description'] =\ "geolayer attribute for the KML's placemark name" self.parameter_input_metadata['PlacemarkNameAttribute.Label'] = "Placemark name attribute" self.parameter_input_metadata['PlacemarkNameAttribute.Tooltip'] = ( "The GeoLayer attribute to populate the output KML's placemark <name> elements.\n" "Each GeoLayer feature is coverted into a KML placemark. Each placemark can have a <name> element.\n" "The attribute values within the PlacemarkNameAttribute will populate each placemark's name.\n" "For further explanation, look at the example KML document under the Structure section of the Keyhole " "Markup Language Wikipedia page.") self.parameter_input_metadata['PlacemarkNameAttribute.Value.Default.Description'] =\ "The output KML placemarks will not have a <name> element." # PlacemarkDescriptionAttribute self.parameter_input_metadata['PlacemarkDescriptionAttribute.Description'] =\ "GeoLayer attribute for KML's placemark description" self.parameter_input_metadata['PlacemarkDescriptionAttribute.Label'] = "Placemark description attribute" self.parameter_input_metadata['PlacemarkDescriptionAttribute.Tooltip'] = ( "The GeoLayer attribute to populate the output KML's placemark <description> elements.\n" "Each GeoLayer feature is coverted into a KML placemark. Each placemark can have a <description> element.\n" "The attribute values within the PlacemarkDescriptionAttribute will populate each" "placemark's description.\n" "For further explanation, look at the example KML document under the Structure section of the " "Keyhole Markup Language Wikipedia page.") self.parameter_input_metadata['PlacemarkDescriptionAttribute.Value.Default'] = \ "The output KML placemarks will not have a <description> element." # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter GeoLayerID is a non-empty, non-None string. # - existence of the GeoLayer will also be checked in run_command(). pv_GeoLayerID = self.get_parameter_value(parameter_name='GeoLayerID', command_parameters=command_parameters) if not validators.validate_string(pv_GeoLayerID, False, False): message = "GeoLayerID parameter has no value." recommendation = "Specify the GeoLayerID parameter to indicate the GeoLayer to write." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter OutputFile is a non-empty, non-None string. # - existence of the folder will also be checked in run_command(). pv_OutputFile = self.get_parameter_value(parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, False, False): message = "OutputFile parameter has no value." recommendation = "Specify the OutputFile parameter (relative or absolute pathname) to indicate the " \ "location and name of the output spatial data file in GeoJSON format." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_write_geolayer(self, geolayer_id, output_file_abs): """ Checks the following: * the ID of the GeoLayer is an existing GeoLayer ID * the output folder is a valid folder Args: geolayer_id: the ID of the GeoLayer to be written output_file_abs: the full pathname to the output file Returns: Boolean. If TRUE, the GeoLayer should be written. If FALSE, at least one check failed and the GeoLayer should not be written. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the GeoLayer ID is not an existing GeoLayer ID, raise a FAILURE. should_run_command.append(validators.run_check(self, "IsGeoLayerIdExisting", "GeoLayerID", geolayer_id, "FAIL")) # If the folder of the OutputFile file path is not a valid folder, raise a FAILURE. should_run_command.append(validators.run_check(self, "DoesFilePathHaveAValidFolder", "OutputFile", output_file_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def run_command(self): """ Run the command. Write the GeoLayer to a spatial data file in KML format. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_GeoLayerID = self.get_parameter_value("GeoLayerID") pv_OutputFile = self.get_parameter_value("OutputFile") pv_PlacemarkNameAttribute = self.get_parameter_value("PlacemarkNameAttribute") pv_PlacemarkDescriptionAttribute = self.get_parameter_value("PlacemarkDescriptionAttribute") # Convert the OutputFile parameter value relative path to an absolute path and expand for ${Property} syntax output_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path(self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_OutputFile, self))) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_write_geolayer(pv_GeoLayerID, output_file_absolute): try: # Get the GeoLayer geolayer = self.command_processor.get_geolayer(pv_GeoLayerID) # Write the GeoLayer to a spatial data file in KML format # "Note that KML by specification uses only a single projection, EPSG:4326. All OGR KML output will be # presented in EPSG:4326. As such OGR will create layers in the correct coordinate system and transform # any geometries." - www.gdal.org/drv_kml.html qgis_util.write_qgsvectorlayer_to_kml(geolayer.qgs_vector_layer, output_file_absolute, "EPSG:4326", pv_PlacemarkNameAttribute, pv_PlacemarkDescriptionAttribute, "clampToGround") # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error writing GeoLayer {} to GeoJSON format.".format(pv_GeoLayerID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log(CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format(self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class WriteTableToDataStore(AbstractCommand): """ Writes a Table to a DataStore object. Command Parameters * TableID (str, required): Identifier for Table to write. Can be specified with ${Property}. * IncludeColumns (str, optional) A comma-separated list of the glob-style patterns filtering which table columns to write. * ExcludeColumns (str, optional) A comma-separated list of the glob-style patterns filtering which table columns to NOT write. This will override IncludeColumns. * DataStoreID (str, required): The ID of a DataStore to receive data. ${Property} syntax is recognized. * DataStoreTable (str, required): The name of the DataStore table to receive data. ${Property} syntax is recognized. * ColumnMap (str, optional): A dictionary indicating which columns in TableID have different names in DataStore Table, using the syntax: ColumnName:DatastoreTableName, ColumnName:DatastoreTableName,... Default: DataStore TableName columns are assumed to match the column names in TableID. * DataStoreRelatedColumnsMap (str, optional): Not currently enabled. * WriteMode (str, required): The method used to write data. Muse choose one of the following: 1. NewTableInsert: a new table is added to the database and all rows of TableID are added to the database table. 2. ExistingTableOverwrite: the existing database table is dropped and another database table is added (with the same name). All rows of TableID are added to the database table. 3. ExistingTableInsert: Rows of the TableID that do NOT conflict with any of the rows in the existing database table are appended to the database table. 4. ExistingTableUpdate: Rows of the TableID that do conflict with any of the rows in the existing database table are used to update the existing database rows. The rows that do NOT conflict with any of the rows in the existing database table are NOT appended to the database table. 5. ExistingTableInsertUpdate: Rows of the TableID that do NOT conflict with any of the rows in the existing database table are appended to the database table. Rows of the TableID that do conflict with any of the rows in the existing database table are used to update the existing database rows. """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("TableID", type("")), CommandParameterMetadata("IncludeColumns", type("")), CommandParameterMetadata("ExcludeColumns", type("")), CommandParameterMetadata("DataStoreID", type("")), CommandParameterMetadata("DataStoreTable", type("")), CommandParameterMetadata("ColumnMap", type("")), CommandParameterMetadata("DataStoreRelatedColumnsMap", type("")), CommandParameterMetadata("WriteMode", type("")) ] # Choices for WriteMode, used to validate parameter and display in editor __choices_WriteMode = [ "NewTableInsert", "ExistingTableOverwrite", "ExistingTableInsert", "ExistingTableUpdate", "ExistingTableInsertUpdate" ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() self.command_name = "WriteTableToDataStore" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "This command processes each row in a Table and executes and " "SQL statement to insert the row into a database DataStore.") self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # TableID self.parameter_input_metadata[ 'TableID.Description'] = "table identifier" self.parameter_input_metadata['TableID.Label'] = "TableID" self.parameter_input_metadata['TableID.Required'] = True self.parameter_input_metadata['TableID.Tooltip'] = "A Table identifier" # IncludeColumns self.parameter_input_metadata[ 'IncludeColumns.Description'] = "table columns to include" self.parameter_input_metadata[ 'IncludeColumns.Label'] = "Include columns" self.parameter_input_metadata['IncludeColumns.Tooltip'] = \ "A comma-separated list of the glob-style patterns filtering which columns to write." self.parameter_input_metadata[ 'IncludeColumns.Value.Default'] = "'* - all columns are processed" # ExcludeColumns self.parameter_input_metadata[ 'ExcludeColumns.Description'] = "table columns to exclude" self.parameter_input_metadata[ 'ExcludeColumns.Label'] = "Exclude columns" self.parameter_input_metadata['ExcludeColumns.Tooltip'] = \ "A comma-separated list of the glob-style patterns filtering which columns to write." self.parameter_input_metadata[ 'ExcludeColumns.Value.Default.Description'] = "all columns" # DataStoreID self.parameter_input_metadata[ 'DataStoreID.Description'] = "database DataStore" self.parameter_input_metadata['DataStoreID.Label'] = "DataStoreID" self.parameter_input_metadata['DataStoreID.Required'] = True self.parameter_input_metadata['DataStoreID.Tooltip'] = \ "The ID of a database DataStore to recieve the data. ${Property} syntax is recognized." # DataStoreTable self.parameter_input_metadata[ 'DataStoreTable.Description'] = "database table to receive data" self.parameter_input_metadata[ 'DataStoreTable.Label'] = "DataStore table" self.parameter_input_metadata['DataStoreTable.Tooltip'] = ( "The name of the database table to receive data. ${Property} syntax is recognized.\n" "If specified, do not specify Sql or SqlFile.") # TODO @jurentie 01/22/19 do these need to be read file selector type? # ColumnMap self.parameter_input_metadata[ 'ColumnMap.Description'] = "map table to datastore columns" self.parameter_input_metadata['ColumnMap.Label'] = "Column map" self.parameter_input_metadata['ColumnMap.Tooltip'] = ( "Specify which columns in the Table have different names in the DataStore table.\n" "Use the syntax: ColumnName:DatastoreTableName, ColumnName:DatastoreTableName,..." ) self.parameter_input_metadata['ColumnMap.Value.Default.Description'] = \ "DataStore table columns names are assumed to match the Table column names." # DataStoreRelatedColumnsMap self.parameter_input_metadata[ 'DataStoreRelatedColumnsMap.Description'] = "datastore relate table columns" self.parameter_input_metadata[ 'DataStoreRelatedColumnsMap.Label'] = "DataStore related column map" self.parameter_input_metadata['DataStoreRelatedColumnsMap.Tooltip'] = ( "Indicate datastore columns that need to match values in a related table in the datastore. '" "This parameter is currently disabled.") self.parameter_input_metadata[ 'DataStoreRelatedColumnsMap.Value.Default'] = ( "DataStore table columns are assumed to match the column names in TableID, " "with no need to perform reference table value matching.") # WriteMode self.parameter_input_metadata[ 'WriteMode.Description'] = "method used to write data" self.parameter_input_metadata['WriteMode.Label'] = "Write mode" self.parameter_input_metadata['WriteMode.Tooltip'] = ( "The method used to write data.\n" "NewTableInsert: a new table is added to the database and all rows of TableID are added to " "the database table \n" "ExistingTableOverwrite: the existing database table is dropped and another database table is " "added (with the same name).\n" "All rows of TableID are added to the database table\n" "ExistingTableInsert: rows of the TableID that do NOT conflict with any of the rows in the existing \n" "database table are appended to the database table.\n" "ExistingTableUpdate: rows of the TableID that do conflict with any of the rows in the existing " "database table are used to update the existing database rows.\n" "The rows that do NOT conflict with any " "of the rows in the existing database table are NOT appended to the database table.\n" "ExistingTableInsertUpdate: rows of the TableID that do NOT conflict with any of the rows in the " "existing database table are appended to the database table.\n" "Rows of the TableID that do conflict with any of the rows in the existing database table are " "used to update the existing database rows.") # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that the required parameters are non-empty, non-None strings. required_parameters = ["TableID", "DataStoreID", "DataStoreTable"] for parameter in required_parameters: parameter_value = self.get_parameter_value( parameter_name=parameter, command_parameters=command_parameters) if not validators.validate_string(parameter_value, False, False): message = "{} parameter has no value.".format(parameter) recommendation = "Specify a valid value for the {} parameter.".format( parameter) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter WriteMode is one of the acceptable values or is None. pv_WriteMode = self.get_parameter_value( parameter_name="WriteMode", command_parameters=command_parameters) if not validators.validate_string_in_list(pv_WriteMode, self.__choices_WriteMode, none_allowed=False, empty_string_allowed=False, ignore_case=True): message = "WriteMode parameter value ({}) is not recognized.".format( pv_WriteMode) recommendation = "Specify one of the acceptable values ({}) for the WriteMode parameter.".format( self.__choices_WriteMode) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) @staticmethod def __get_table_cols_to_write(include_col_patterns, exclude_col_patterns, table): """ The command allows for users to select a subset of the Table columns to write to the DataStore database. This function returns a list of Table columns configured to write data by the user inputs. Args: include_col_patterns (str): A comma-separated list of the glob-style patterns filtering which table columns to write. exclude_col_patterns (str): A comma-separated list of the glob-style patterns filtering which table columns to NOT write. This will override IncludeColumns. table (obj): the Table that is being written to the DataStore Return: A list of Table column names configured to write data. """ # Convert the IncludeColumns and the ExcludeColumns parameters from strings to lists table_cols_to_include_patterns = string_util.delimited_string_to_list( include_col_patterns) table_cols_to_exclude_patterns = string_util.delimited_string_to_list( exclude_col_patterns) # Get a list of all of the columns in the Table all_table_cols = table.get_column_names() # Get a list of the columns in the Table that are configured to be pushed to the DataStore table_cols_to_include = string_util.filter_list_of_strings( all_table_cols, table_cols_to_include_patterns, table_cols_to_exclude_patterns, return_inclusions=True) # Return a list of Table column names configured to write data. return table_cols_to_include @staticmethod def __get_table_cols_to_exclude(include_col_patterns, exclude_col_patterns, table): """ The command allows for users to select a subset of the Table columns to write to the DataStore database. This function returns a list of Table columns NOT configured to write data by the user inputs. Args: include_col_patterns (str): A comma-separated list of the glob-style patterns filtering which table columns to write. exclude_col_patterns (str): A comma-separated list of the glob-style patterns filtering which table columns to NOT write. This will override IncludeColumns. table (obj): the Table that is being written to the DataStore Return: A list of Table column names configured to write data. """ # Convert the IncludeColumns and the ExcludeColumns parameters from strings to lists table_cols_to_include_patterns = string_util.delimited_string_to_list( include_col_patterns) table_cols_to_exclude_patterns = string_util.delimited_string_to_list( exclude_col_patterns) # Get a list of all of the columns in the Table all_table_cols = table.get_column_names() # Get a list of the columns in the Table that are NOT configured to be pushed to the DataStore table_cols_to_exclude = string_util.filter_list_of_strings( all_table_cols, table_cols_to_include_patterns, table_cols_to_exclude_patterns, return_inclusions=False) # Return a list of Table column names NOT configured to write data. return table_cols_to_exclude @staticmethod def __get_mapped_datastore_col_from_table_col(table_col_name, col_map_dic): """ Get the corresponding DataStore table column name given the Table column name. This is achieved by looking up the corresponding values in the user-configured ColumnMap. Args: table_col_name (str): the name of the Table column col_map_dic (dic): a dictionary mapping the Table columns to the DataStore table columns Key: Table column ---> Value: DataStore table column Return: The corresponding DataStore table column name. """ # If the Table column name is registered in the ColumnMap, return the corresponding DataStore table column name. if table_col_name in col_map_dic.keys(): return col_map_dic[table_col_name] # If the Table column name is not registered in the ColumnMap, assume the Table column name directly maps to a # DataStore table column name. Return the Table column name. else: return table_col_name def __get_datastore_cols_to_receive(self, table_cols_to_write, col_map_dic): """ Get a list of the columns in the DataStore that are configured to receive data. Args: table_cols_to_write: a list of Table column names that are configured to write data col_map_dic (dic): a dictionary mapping the Table columns to the DataStore table columns Key: Table column ---> Value: DataStore table column Return: A list of DataBase table columns that are expected to receive data. """ # Get a list of the columns in the DataStore that are configured to receive data. datastore_table_cols_to_receive = [] # Iterate over the Table Columns to Write for table_col_to_include in table_cols_to_write: # Get the corresponding DataStore table column name, as configured with user input in the ColumnMap. corresponding_datastore_table_col_name = self.__get_mapped_datastore_col_from_table_col( table_col_to_include, col_map_dic) # Add corresponding DataStore table column name to the master list. datastore_table_cols_to_receive.append( corresponding_datastore_table_col_name) # Return the list of the columns in the DataStore that are configured to receive data. return datastore_table_cols_to_receive def __should_write_table(self, table_id, datastore_id, datastore_table_name, writemode): """ Checks the following: * the Table ID exists * the DataStore ID exists * the DataStore table exists if the writemode is ExistingTableInsert, ExistingTableUpdate, or ExistingTableInsertUpdate * the DataStore table does not exist if the writemode starts with NewTable Args: table_id (str): the ID of the Table to write datastore_id (str): the ID of the DataStore to receive data datastore_table_name (str): the name of the DataStore table to receive data writemode (str): the method used to write data Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the DataStore ID is not an existing DataStore ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreIdExisting", "DataStoreID", datastore_id, "FAIL")) # Only run the following check if the previous check passed. if False not in should_run_command: if writemode.upper().startswith( "EXISTING" ) and not writemode.upper().endswith("OVERWRITE"): # If the DataStoreTable is not a table within the DataStore, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableInDataStore", "DataStoreTable", datastore_table_name, "FAIL", other_values=[datastore_id])) if writemode.upper().startswith("NEW"): # If the DataStoreTable is a table within the DataStore, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsDataStoreTableUnique", "DataStoreTable", datastore_table_name, "FAIL", other_values=[datastore_id])) # If the Table ID is not an existing Table ID, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsTableIdExisting", "TableID", table_id, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True def __should_write_table2(self, datastore, datastore_table_name, datastore_table_cols_to_receive, writemode): """ Checks the following: * the datastore columns configured to receive data are existing columns within the DataStore table Args: datastore (obj): the DataStore that is receiving the data datastore_table_name (str): the name of the DataStore table that is receiving the data datastore_table_cols_to_receive (list of strings): A list of DataBase table columns that are expected to receive data. Returns: Boolean. If TRUE, the process should be run. If FALSE, it should not be run. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # A list of Table columns that do not map to the DataStore Table columns. invalid_columns = [] # Do not run this check if the WriteMode is NewTableInsert or ExistingTableOverwrite exception_modes = ["NEWTABLEINSERT", "EXISTINGTABLEOVERWRITE"] if writemode.upper() not in exception_modes: # Get the DataStore Table columns. datastore_table_cols = datastore.return_col_names( datastore_table_name) # Iterate over the DataStore columns that are configured to read data. for datastore_table_col_to_receive in datastore_table_cols_to_receive: # If the configured DataStore column does not exist in the DataStore table, add the configured # DataStore column to the list of invalid columns. if datastore_table_col_to_receive not in datastore_table_cols: invalid_columns.append(datastore_table_col_to_receive) # If there are any invalid configured DataStore columns, raise a FAILURE. if invalid_columns: message = "One or more of the DataStore columns configured to be edited do(es) not exist in the DataStore" \ " table ({}). The invalid columns are: \n({}).".format(datastore_table_name, invalid_columns) recommendation = "Specify valid DataStore columns to edit." self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) should_run_command.append(False) # Return the Boolean to determine if the process should be run. if False in should_run_command: self.warning_count += 1 return False else: return True def run_command(self): """ Run the command. Read the Table from the DataStore Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values. pv_TableID = self.get_parameter_value("TableID") pv_IncludeColumns = self.get_parameter_value("IncludeColumns", default_value="*") pv_ExcludeColumns = self.get_parameter_value("ExcludeColumns", default_value="''") pv_DataStoreID = self.get_parameter_value("DataStoreID") pv_DataStoreTable = self.get_parameter_value("DataStoreTable") pv_ColumnMap = self.get_parameter_value("ColumnMap", default_value="") pv_DataStoreRelatedColumnsMap = self.get_parameter_value( "DataStoreRelatedColumnsMap") pv_WriteMode = self.get_parameter_value("WriteMode").upper() # Expand for ${Property} syntax. pv_TableID = self.command_processor.expand_parameter_value( pv_TableID, self) pv_DataStoreID = self.command_processor.expand_parameter_value( pv_DataStoreID, self) pv_DataStoreTable = self.command_processor.expand_parameter_value( pv_DataStoreTable, self) # Run the checks on the parameter values. Only continue if the checks pass. if self.__should_write_table(pv_TableID, pv_DataStoreID, pv_DataStoreTable, pv_WriteMode): # Get the Table object. table_obj = self.command_processor.get_table(pv_TableID) # Get DataStore object datastore_obj = self.command_processor.get_datastore( pv_DataStoreID) # Convert the ColumnMap from string to a dictionary. Key: Table Column Name; Value: DataStore Column Name col_map_dic = string_util.delimited_string_to_dictionary_one_value( pv_ColumnMap, ",", ":", True) # Get the list of the columns in the Table that are configured to write. table_cols_to_write = self.__get_table_cols_to_write( pv_IncludeColumns, pv_ExcludeColumns, table_obj) # Get the list of the columns in the DataStore that are configured to receive data. datastore_table_cols_to_receive = self.__get_datastore_cols_to_receive( table_cols_to_write, col_map_dic) # Run a second level of checks. Only continue if the check passes. if self.__should_write_table2(datastore_obj, pv_DataStoreTable, datastore_table_cols_to_receive, pv_WriteMode): try: # Get the list of the columns in the Table that are NOT configured to write. table_cols_to_exclude = self.__get_table_cols_to_exclude( pv_IncludeColumns, pv_ExcludeColumns, table_obj) # Make a deep copy of the Table object. table_obj_copy = table_obj.deep_copy() # Remove the copied pandas Data Frame columns that are not to be written to the DataStore. table_obj_copy = table_obj_copy.drop( columns=table_cols_to_exclude) # Rename the copied pandas Data Frame columns to match the columns in the database table. table_obj_copy = table_obj_copy.rename(columns=col_map_dic) # Write the copied pandas Data Frame to the DataStore's database table. # If the WriteMode is NewTableInsert, continue. if pv_WriteMode.upper() == "NEWTABLEINSERT": table_obj_copy.to_sql(name=pv_DataStoreTable, con=datastore_obj.engine, index=False) # If the WriteMode is ExistingTableOverwrite, continue. elif pv_WriteMode.upper() == "EXISTINGTABLEOVERWRITE": table_obj_copy.to_sql(name=pv_DataStoreTable, con=datastore_obj.engine, if_exists="replace", index=False) # If the WriteMode is ExistingTableInsert, continue. elif pv_WriteMode.upper() == "EXISTINGTABLEINSERT": table_obj_copy.to_sql(name=pv_DataStoreTable, con=datastore_obj.engine, if_exists="append", index=False) # If the WriteMode is ExistingTableUpdate, continue. elif pv_WriteMode.upper() == "EXISTINGTABLEUPDATE": print( "The ExistingTableUpdate WriteMode is currently disabled." ) # If the WriteMode is ExistingTableInsertUpdate, continue. elif pv_WriteMode.upper() == "EXISTINGTABLEINSERTUPDATE": print( "The ExistingTableInsertUpdate WriteMode is currently disabled." ) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error writing Table {} to DataStore ({}).".format( pv_TableID, pv_DataStoreID) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class UnzipFile(AbstractCommand): """ Unzips a file. Command Parameters * File (str, required): the path to the file to extract relative or absolute) * FileType (str, optional): the type of compressed file. Must be one of the following: `zip`: ZIP file (.zip) `tar`: Tape archive (.tar) Default: determined by the files extension. * OutputFolder (str, optional): the folder that will hold the extracted contents of the compressed file. Default: The parent folder of the compressed file. * DeleteFile (boolean, optional): If TRUE, the compressed file will be deleted after the extraction takes place. If FALSE, the compressed file will remain. Default: FALSE """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("File", type("")), CommandParameterMetadata("FileType", type("")), CommandParameterMetadata("OutputFolder", type("")), CommandParameterMetadata("DeleteFile", type("")) ] def __init__(self): """ Initialize the command """ # AbstractCommand data super().__init__() self.command_name = "UnzipFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = "Unzip a compressed file." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # File self.parameter_input_metadata[ 'File.Description'] = "file to be unzipped" self.parameter_input_metadata['File.Label'] = "File" self.parameter_input_metadata['File.Required'] = True self.parameter_input_metadata['File.Tooltip'] = \ "The file to be unzipped (relative or absolute path). ${Property} syntax is recognized." self.parameter_input_metadata['File.FileSelector.Type'] = "Read" self.parameter_input_metadata[ 'File.FileSelector.Title'] = "Select the file to be unzipped" # FileType self.parameter_input_metadata[ 'FileType.Description'] = "input file format" self.parameter_input_metadata['FileType.Label'] = "File type" self.parameter_input_metadata['FileType.Tooltip'] = ( "The file format of the input File. The following file formats are currently accepted.\n\n" "TAR: a .tar file.\n" "ZIP: A .zip file.") self.parameter_input_metadata[ 'FileType.Value.Default.Description'] = "from the File extension." # OutputFolder self.parameter_input_metadata[ 'OutputFolder.Description'] = "name of the destination folder" self.parameter_input_metadata['OutputFolder.Label'] = "Output folder" self.parameter_input_metadata['OutputFolder.Tooltip'] = ( "The name of the destination folder. The extracted files are saved here.\n" "${Property} syntax is recognized.") self.parameter_input_metadata[ 'OutputFolder.FileSelector.Type'] = "Write" self.parameter_input_metadata[ 'OutputFolder.FileSelector.Title'] = "Select the destination folder" self.parameter_input_metadata[ 'OutputFolder.Value.Default.Description'] = "parent folder of the File" # DeleteFile self.parameter_input_metadata[ 'DeleteFile.Description'] = 'whether to delete the file' self.parameter_input_metadata['DeleteFile.Label'] = "Delete file?" self.parameter_input_metadata['DeleteFile.Tooltip'] = ( "Boolean.\n\n" "If True, the compressed file is deleted after the extraction.\n" "If False, the compressed file remains after the extraction.") self.parameter_input_metadata['DeleteFile.Value.Default'] = "False" self.parameter_input_metadata['DeleteFile.Values'] = [ "", "True", "False" ] # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that either the parameter File is a non-empty, non-None string. pv_File = self.get_parameter_value( parameter_name='File', command_parameters=command_parameters) if not validators.validate_string(pv_File, False, False): message = "File parameter has no value." recommendation = "Specify the File parameter to indicate the compressed file to extract." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional parameter FileType is an acceptable value or is None. pv_FileType = self.get_parameter_value( parameter_name="FileType", command_parameters=command_parameters) acceptable_values = ["Zip", "Tar"] if not validators.validate_string_in_list(pv_FileType, acceptable_values, none_allowed=True, empty_string_allowed=False, ignore_case=True): message = "FileType parameter value ({}) is not recognized.".format( pv_FileType) recommendation = "Specify one of the acceptable values ({}) for the" \ " FileType parameter.".format(acceptable_values) warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that optional DeleteFile parameter value is a valid Boolean value or is None. pv_DeleteFile = self.get_parameter_value( parameter_name="DeleteFile", command_parameters=command_parameters) if not validators.validate_bool( pv_DeleteFile, none_allowed=True, empty_string_allowed=False): message = "DeleteFile parameter value ({}) is not a recognized boolean value.".format( pv_DeleteFile) recommendation = "Specify either 'True' or 'False for the DeleteFile parameter." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_extract_file(self, file_abs, output_folder_abs, file_type): """ Checks the following: * the File is a valid file * the OutputFolder is a valid folder * the FileType correctly identifies the File's type Args: file_abs (str): the full path to the input compressed File output_folder_abs(str): the full path to the OutputFolder file_type(str): the FileType value depicting the file type of the input File Returns: Boolean. If TRUE, the file should be extracted. If FALSE, at least one check failed and the file should not be extracted. """ # List of Boolean values. The Boolean values correspond to the results of the following tests. If TRUE, the # test confirms that the command should be run. should_run_command = [] # If the File parameter value is not a valid file, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFilePathValid", "File", file_abs, "FAIL")) # If the OutputFolder parameter value is not a valid folder, raise a FAILURE. should_run_command.append( validators.run_check(self, "IsFolderPathValid", "OutputFolder", output_folder_abs, "FAIL")) # If the File Type is not recognized, raise a FAILURE. if file_type is None: message = "A valid FileType cannot be determined from the file ({}).".format( file_abs) recommendation = "Use the FileType parameter to assign the appropriate file type." should_run_command.append(False) self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # If the File Type is not actually recognized by the input File, raise a FAILURE. if file_type.upper() == "ZIP": should_run_command.append( validators.run_check(self, "IsZipFile", "File", file_abs, "FAIL")) elif file_type.upper() == "TAR": should_run_command.append( validators.run_check(self, "IsTarFile", "File", file_abs, "FAIL")) # Return the Boolean to determine if the process should be run. if False in should_run_command: return False else: return True @staticmethod def __get_default_file_type(file_path): """ Helper function to get the default FileType parameter value. Arg: file_path: the absolute path to the input File parameter Returns: The default FileType parameter value. Returns None if the file extension does not correlate with a compatible FileType. """ # A dictionary of compatible file extensions and their corresponding FileType. # key: Uppercase file extension. # value: Uppercase file type. dic = {".TAR": "TAR", ".ZIP": "ZIP"} # Iterate over the dictionary and return the FileType that corresponds to the the input file's extension. for ext, file_type in dic.items(): if io_util.get_extension(file_path).upper() == ext: return file_type # If the file extension is not recognized, return None. return None def run_command(self): """ Run the command. Extract the compressed file. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the File and the DeleteFile parameter values. pv_File = self.get_parameter_value("File") pv_DeleteFile = self.get_parameter_value("DeleteFile", default_value="False") # Convert the File parameter value relative path to an absolute path. Expand for ${Property} syntax. file_abs = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value(pv_File, self))) # Get the FileType parameter value. default_file_ext = self.__get_default_file_type(file_abs) pv_FileType = self.get_parameter_value("FileType", default_value=default_file_ext) # Get the OutputFolder parameter value. parent_folder = io_util.get_path(file_abs) pv_OutputFolder = self.get_parameter_value("OutputFolder", default_value=parent_folder) # Convert the OutputFolder parameter value relative path to an absolute path. Expand for ${Property} syntax. output_folder_abs = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_OutputFolder, self))) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_extract_file(file_abs, output_folder_abs, pv_FileType): try: # If the file is a .zip file, extract the zip file. if pv_FileType.upper() == "ZIP": zip_util.unzip_all_files(file_abs, output_folder_abs) # If the file is a .tar file, extract the tar file. elif pv_FileType.upper() == "TAR": zip_util.untar_all_files(file_abs, output_folder_abs) # If configured, remove the input compressed file. if string_util.str_to_bool(pv_DeleteFile): os.remove(file_abs) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error extracting the {} file ({}).".format( pv_FileType, pv_File) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class WebGet(AbstractCommand): """ Downloads a file from a web url. This command downloads a file on the web and saves it on the local computer. Command Parameters: * URL (str, required): the URL of the file to be downloaded. * OutputFile (str, optional): the relative pathname of the output file. Default: Filename is the same as the url filename. File is saved to the parent folder of the gp workflow file (the working directory). * Username (str, optional): an appropriate username if the URL to download is private * Password (str, optional): an appropriate password if the URL to download is private """ # Define the command parameters. __command_parameter_metadata = [ CommandParameterMetadata("URL", type("")), CommandParameterMetadata("OutputFile", type("")), CommandParameterMetadata("Username", type("")), CommandParameterMetadata("Password", type("")) ] def __init__(self): """ Initialize the command. """ # AbstractCommand data super().__init__() # Name of command for menu and window title self.command_name = "WebGet" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = "Download a file from a URL." self.command_metadata['EditorType'] = "Simple" self.parameter_input_metadata = dict() # URL self.parameter_input_metadata[ 'URL.Description'] = "URL to read content" self.parameter_input_metadata['URL.Label'] = "URL" self.parameter_input_metadata['URL.Tooltip'] =\ "Specify the URL from which to read content, can use ${Property}." self.parameter_input_metadata['URL.Required'] = True self.parameter_input_metadata['URL.FileSelector.Type'] = "Read" # Output File self.parameter_input_metadata['OutputFile.Description'] = "" self.parameter_input_metadata['OutputFile.Label'] = "Output File" self.parameter_input_metadata['OutputFile.Tooltip'] = ( "The output file path (relative or absolute). ${Property} syntax is recognized.\n" "Formatting specifiers such as %f are recognized.\n" "The output file will be overwritten if it already exists.\n" "The password is not encrypted here so read-only or public accounts are recommended." ) self.parameter_input_metadata['OutputFile.Required'] = False self.parameter_input_metadata['OutputFile.FileSelector.Type'] = "Write" # Username self.parameter_input_metadata[ 'Username.Description'] = "case-sensitive" self.parameter_input_metadata['Username.Label'] = "User name" self.parameter_input_metadata[ 'Username.Tooltip'] = "Specify a valid username to access a private URL file." self.parameter_input_metadata['Username.Required'] = False # Password self.parameter_input_metadata[ 'Password.Description'] = "case-sensitive" self.parameter_input_metadata['Password.Label'] = "Password" self.parameter_input_metadata[ 'Password.Tooltip'] = "Specify a valid password to access a private URL file." self.parameter_input_metadata['Password.Required'] = False # Class data self.warning_count = 0 self.logger = logging.getLogger(__name__) def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: None. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" # Check that parameter URL is a non-empty, non-None string. # - existence of the url will also be checked in run_command(). pv_URL = self.get_parameter_value( parameter_name='URL', command_parameters=command_parameters) if not validators.validate_string(pv_URL, False, False): message = "URL parameter has no value." recommendation = "Specify the URL parameter to indicate the URL of the file to download." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check that parameter OutputFile is a non-empty string (can be None). # - existence of the folder will also be checked in run_command(). pv_OutputFile = self.get_parameter_value( parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, True, False): message = "OutputFile parameter has no value." recommendation = "Specify the OutputFile parameter to indicate the output file." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception. if len(warning) > 0: self.logger.warning(warning) raise ValueError(warning) else: # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def __should_run_webget(self, output_file_abs): """ Checks the following: * the output folder is a valid folder Args: output_file_abs: the full pathname to the output file Returns: run_webget: Boolean. If TRUE, the webget process should be run. If FALSE, it should not be run. """ # Boolean to determine if the webget process should be run. Set to true until an error occurs. run_webget = True # If the OutputFolder is not a valid folder, raise a FAILURE. output_folder = os.path.dirname(output_file_abs) if not os.path.isdir(output_folder): run_webget = False self.warning_count += 1 message = 'The output folder ({}) of the OutputFile is not a valid folder.'.format( output_folder) recommendation = 'Specify a valid relative pathname for the output file.' self.logger.error(message) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Return the Boolean to determine if the webget process should be run. If TRUE, all checks passed. If FALSE, # one or many checks failed. return run_webget @staticmethod def __rename_files_in_a_folder(list_of_files, folder_path, new_filename): """ Renames files within a folder to a new name. The files retain their pathname (they stay in the same folder) and retain their file extension. Args: list_of_files (list of strings): a list of filenames (without the path but WITH the extension) folder_path (string): the full pathname to the folder that is storing the files in the list_of_files list new_filename (string): the new filename for the files in the list_of_files list. All files will be renamed to this same value. Returns: None. Raises: None. """ # Iterate over the files to be renamed. for existing_file in list_of_files: # Get the full path of the existing file existing_path = os.path.join(folder_path, existing_file) # Get the file extension of the existing file existing_extension = io_util.get_extension(existing_path) # Create the full path of the renamed file. If an extension was included in the original filename, then that # same extension is included in the new filename. new_path = os.path.join( folder_path, "{}{}".format(new_filename, existing_extension)) os.rename(existing_path, new_path) def run_command(self): """ Run the command. Download the file from the web and save it on the local computer. Returns: None. Raises: RuntimeError if any warnings occurred during run_command method. """ # Obtain the parameter values pv_URL = self.get_parameter_value("URL") pv_OutputFile = self.get_parameter_value("OutputFile", default_value=None) pv_Username = self.get_parameter_value("Username", default_value=None) pv_Password = self.get_parameter_value("Password", default_value=None) # Convert the pv_URL parameter to expand for ${Property} syntax. url_abs = self.command_processor.expand_parameter_value(pv_URL, self) # Convert the OutputFile parameter value relative path to an absolute path. Expand for ${Property} syntax. # If the OutputFile parameter is specified, continue. if pv_OutputFile: output_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_OutputFile, self))) # If the OutputFile parameter is NOT specified, continue. else: original_filename = io_util.get_filename( pv_URL) + io_util.get_extension(pv_URL) output_file_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( original_filename, self))) # Run the checks on the parameter values. Only continue if the checks passed. if self.__should_run_webget(output_file_absolute): try: # Get the output folder. output_folder = os.path.dirname(output_file_absolute) # Get the URL file and convert it into a request Response object # Authentication Reference: http://docs.python-requests.org/en/master/user/authentication/ r = requests.get(url_abs, auth=HTTPBasicAuth(pv_Username, pv_Password), verify=False, stream=True) # Get the filename of the URL and the output file url_filename = io_util.get_filename(url_abs) output_filename = io_util.get_filename(output_file_absolute) # Remove the output file if it already exists. if os.path.exists(output_file_absolute): os.remove(output_file_absolute) # If the URL file is a zip file, process as a zip file. if zip_util.is_zip_file_request(r): # Create an empty list to hold the files that were downloaded/extracted to the output folder. downloaded_files = [] with open( os.path.join(output_folder, "{}.zip".format(url_filename)), "wb") as downloaded_zip_file: downloaded_zip_file.write(r.content) downloaded_files.append("{}.zip".format(url_filename)) # Determine if the downloaded zip file(s) should be renamed. If the filename is %f then the # filenames of the downloaded products should be the same as the url filenames if not output_filename == '%f': self.__rename_files_in_a_folder( list_of_files=downloaded_files, folder_path=output_folder, new_filename=output_filename) else: # Download the file to the output folder. with open( os.path.join(output_folder, os.path.basename(url_abs)), "wb") as downloaded_file: downloaded_file.write(r.content) # Determine if the downloaded file should be renamed. If the filename is %f then the filename # of the downloaded product should be the same as the url filename if not output_filename == '%f': self.__rename_files_in_a_folder( list_of_files=[os.path.basename(url_abs)], folder_path=output_folder, new_filename=output_filename) # Raise an exception if an unexpected error occurs during the process except Exception as e: self.warning_count += 1 message = "Unexpected error downloading file from URL {}.".format( url_abs) recommendation = "Check the log file for details." self.logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Determine success of command processing. Raise Runtime Error if any errors occurred if self.warning_count > 0: message = "There were {} warnings proceeding this command.".format( self.warning_count) raise RuntimeError(message) # Set command status type as SUCCESS if there are no errors. else: self.command_status.refresh_phase_severity( CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class Message(AbstractCommand): """ The Message command prints a message to the log file and optionally sets the command status to alert about an issue. """ __command_parameter_metadata = [ CommandParameterMetadata("Message", type("")), CommandParameterMetadata("CommandStatus", type("")) ] def __init__(self): """ Initialize the command instance. """ super().__init__() self.command_name = "Message" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata['Description'] = ( "Print a message to the log file and optionally set the command status for notification.\n" "For example, use with an If command to notify when a condition is met." ) self.command_metadata['EditorType'] = "Simple" # Parameter Metadata self.parameter_input_metadata = dict() # Message self.parameter_input_metadata[ 'Message.Description'] = "message to print" self.parameter_input_metadata['Message.Label'] = "Message" self.parameter_input_metadata[ 'Message.Tooltip'] = "The message to print. Can be specified using ${Property}." self.parameter_input_metadata['Message.Required'] = True # CommandStatus self.parameter_input_metadata[ 'CommandStatus.Description'] = "status that should result from command" self.parameter_input_metadata['CommandStatus.Label'] = "Command status" self.parameter_input_metadata['CommandStatus.Tooltip'] = \ "Indicate the status that should result from running the command: SUCCESS, WARNING, FAILURE." self.parameter_input_metadata['CommandStatus.Values'] = [ "", "SUCCESS", "WARNING", "FAILURE" ] self.parameter_input_metadata[ 'CommandStatus.Value.Default'] = "SUCCESS" def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning = "" logger = logging.getLogger("gp") # Message is required pv_Message = self.get_parameter_value( parameter_name='Message', command_parameters=command_parameters) if not validators.validate_string(pv_Message, False, False): message = "Message parameter has no value." recommendation = "Specify text for the Message parameter." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) pv_CommandStatus = self.get_parameter_value( parameter_name='CommandStatus', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_CommandStatus, CommandStatusType.get_command_status_types_as_str(), True, True): message = 'The requested command status "' + pv_CommandStatus + '"" is invalid.' recommendation = "Specify a valid command status." warning += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning = command_util.validate_command_parameter_names(self, warning) # If any warnings were generated, throw an exception if len(warning) > 0: logger.warning(warning) raise ValueError(warning) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def run_command(self): """ Run the command. Print the message to the log file. Returns: Nothing. Raises: RuntimeError if any exception occurs. """ logger = logging.getLogger(__name__) warning_count = 0 # Message parameter won't be null. pv_Message = self.get_parameter_value('Message') pv_CommandStatus = self.get_parameter_value('CommandStatus') if pv_CommandStatus is None or pv_CommandStatus == "": # Default status as a string pv_commandStatus = str(CommandStatusType.SUCCESS) # Convert the string to the enum command_status_type = CommandStatusType.value_of(pv_CommandStatus, ignore_case=True) message_expanded = self.command_processor.expand_parameter_value( pv_Message) logger.info(message_expanded) # Add a log message for the requested status type # - don't add to the warning count self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(command_status_type, message_expanded, "")) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)
class WritePropertiesToFile(AbstractCommand): """ The WritePropertiesToFile command writes processor properties to a file. """ __command_parameter_metadata = [ CommandParameterMetadata("OutputFile", type("")), CommandParameterMetadata("IncludeProperties", type("")), CommandParameterMetadata("WriteMode", type("")), CommandParameterMetadata("FileFormat", type("")), CommandParameterMetadata("SortOrder", type("")) ] # Choices for WriteMode, used to validate parameter and display in editor __choices_WriteMode = ["Append", "Overwrite"] # Choices for FileFormat, used to validate parameter and display in editor __choices_FileFormat = [ "NameTypeValue", "NameTypeValuePython", "NameValue" ] # Choices for SortOrder, used to validate parameter and display in editor __choices_SortOrder = ["Ascending", "Descending"] def __init__(self): """ Initialize a new instance of the command. """ # AbstractCommand data super().__init__() self.command_name = "WritePropertiesToFile" self.command_parameter_metadata = self.__command_parameter_metadata # Command metadata for command editor display self.command_metadata = dict() self.command_metadata[ 'Description'] = "Write command processor properties to a file." self.command_metadata['EditorType'] = "Simple" # Command Parameter Metadata self.parameter_input_metadata = dict() # OutputFile self.parameter_input_metadata[ 'OutputFile.Description'] = "output file to write" self.parameter_input_metadata['OutputFile.Label'] = "Output file" self.parameter_input_metadata['OutputFile.Required'] = True self.parameter_input_metadata['OutputFile.Tooltip'] = ( "The output file to write, as an absolute path or relative to the command file.\n" "Can use ${Property}.") self.parameter_input_metadata['OutputFile.FileSelector.Type'] = "Write" self.parameter_input_metadata[ 'OutputFile.FileSelector.Title'] = "Select the output file" # IncludeProperties self.parameter_input_metadata[ 'IncludeProperties.Description'] = "names of properties to write" self.parameter_input_metadata[ 'IncludeProperties.Label'] = "Include properties" self.parameter_input_metadata['IncludeProperties.Tooltip'] = ( "The names of properties to write, separated by commas.\n" "The '*' wildcard can be used to indicate multiple properties.") self.parameter_input_metadata[ 'IncludeProperties.Value.Default'] = "write all properties." # WriteMode self.parameter_input_metadata[ 'WriteMode.Description'] = "file write mode" self.parameter_input_metadata['WriteMode.Label'] = "Write mode" self.parameter_input_metadata['WriteMode.Tooltip'] = ( "Indicates how the file should be written:\n" "Append – append the properties to the file without checking for matches (create the file if " "it does not exist).\n" "Overwrite – overwrite the properties file.") self.parameter_input_metadata['WriteMode.Values'] = [ "", "Append", "Overwrite" ] self.parameter_input_metadata['WriteMode.Value.Default'] = "Overwrite" # FileFormat self.parameter_input_metadata['FileFormat.Description'] = "file format" self.parameter_input_metadata['FileFormat.Label'] = "File format" self.parameter_input_metadata[ 'FileFormat.Tooltip'] = "The file format." self.parameter_input_metadata['FileFormat.Values'] = [ "", "NameTypeValue", "NameTypeValuePython", "NameValue" ] self.parameter_input_metadata[ 'FileFormat.Value.Default'] = 'NameValueType' # SortOrder self.parameter_input_metadata['SortOrder.Description'] = "sort order" self.parameter_input_metadata['SortOrder.Label'] = "Sort order" self.parameter_input_metadata['SortOrder.Tooltip'] = "The sort order." self.parameter_input_metadata['SortOrder.Values'] = [ "", "Ascending", "Descending" ] self.parameter_input_metadata['SortOrder.Value.Default'] = 'Ascending' def check_command_parameters(self, command_parameters): """ Check the command parameters for validity. Args: command_parameters: the dictionary of command parameters to check (key:string_value) Returns: Nothing. Raises: ValueError if any parameters are invalid or do not have a valid value. The command status messages for initialization are populated with validation messages. """ warning_message = "" logger = logging.getLogger(__name__) # OutputFile is required pv_OutputFile = self.get_parameter_value( parameter_name='OutputFile', command_parameters=command_parameters) if not validators.validate_string(pv_OutputFile, False, False): message = "The OutputFile must be specified." recommendation = "Specify the output file." warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # IncludeProperties is optional, default to * at runtime # WriteMode is optional, will default to Overwrite at runtime pv_WriteMode = self.get_parameter_value( parameter_name='WriteMode', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_WriteMode, self.__choices_WriteMode, True, True): message = "WriteMode parameter is invalid." recommendation = "Specify the WriteMode parameter as blank or one of " + \ str(self.__choices_WriteMode) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # FileFormat is optional, will default to NameTypeValue at runtime pv_FileFormat = self.get_parameter_value( parameter_name='FileFormat', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_FileFormat, self.__choices_FileFormat, True, True): message = "FileFormat parameter is invalid." recommendation = "Specify the FileFormat parameter as blank or one of " + \ str(self.__choices_FileFormat) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # SortOrder is optional, will default to None (no sort) at runtime pv_SortOrder = self.get_parameter_value( parameter_name='SortOrder', command_parameters=command_parameters) if not validators.validate_string_in_list( pv_SortOrder, self.__choices_SortOrder, True, True): message = "SortOrder parameter is invalid." recommendation = "Specify the SortOrder parameter as blank or one of " + \ str(self.__choices_SortOrder) warning_message += "\n" + message self.command_status.add_to_log( CommandPhaseType.INITIALIZATION, CommandLogRecord(CommandStatusType.FAILURE, message, recommendation)) # Check for unrecognized parameters. # This returns a message that can be appended to the warning, which if non-empty # triggers an exception below. warning_message = command_util.validate_command_parameter_names( self, warning_message) # If any warnings were generated, throw an exception if len(warning_message) > 0: logger.warning(warning_message) raise ValueError(warning_message) # Refresh the phase severity self.command_status.refresh_phase_severity( CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS) def run_command(self): """ Run the command. Write the processor properties to a file. Returns: Nothing. Raises: RuntimeError: if a runtime input error occurs. """ warning_count = 0 logger = logging.getLogger(__name__) # Get data for the command pv_OutputFile = self.get_parameter_value('OutputFile') pv_IncludeProperties = self.get_parameter_value('IncludeProperties') include_properties = [] # Default if pv_IncludeProperties is not None and len(pv_IncludeProperties) > 0: include_properties = string_util.delimited_string_to_list( pv_IncludeProperties) pv_WriteMode = self.get_parameter_value('WriteMode') write_mode = pv_WriteMode if pv_WriteMode is None or pv_WriteMode == "": write_mode = 'Overwrite' # Default pv_FileFormat = self.get_parameter_value('FileFormat') file_format = pv_FileFormat if pv_FileFormat is None or pv_FileFormat == "": file_format = 'NameTypeValue' # Default pv_SortOrder = self.get_parameter_value('SortOrder') sort_order = 0 # no sort if pv_SortOrder is not None: if pv_SortOrder == 'Ascending': sort_order = 1 elif pv_SortOrder == 'Descending': sort_order = -1 # Runtime checks on input pv_OutputFile_absolute = io_util.verify_path_for_os( io_util.to_absolute_path( self.command_processor.get_property('WorkingDir'), self.command_processor.expand_parameter_value( pv_OutputFile, self))) if warning_count > 0: message = "There were " + str( warning_count) + " warnings about command parameters." logger.warning(message) raise ValueError(message) # Write the output file try: problems = [] # Empty list of properties io_util.write_property_file(pv_OutputFile_absolute, self.command_processor.properties, include_properties, write_mode, file_format, sort_order, problems) # Record any problems that were found for problem in problems: warning_count += 1 logger.error(problem) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, problem, "See the log file for details.")) except Exception as e: warning_count += 1 traceback.print_exc( file=sys.stdout) # Formatting of error seems to have issue message = 'Unexpected error writing file "' + pv_OutputFile_absolute + '"' logger.error(message, e, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) except: warning_count += 1 traceback.print_exc( file=sys.stdout) # Formatting of error seems to have issue message = 'Unexpected error writing file "' + pv_OutputFile_absolute + '"' logger.error(message, exc_info=True) self.command_status.add_to_log( CommandPhaseType.RUN, CommandLogRecord(CommandStatusType.FAILURE, message, "See the log file for details.")) if warning_count > 0: message = "There were " + str( warning_count) + " warnings processing the command." logger.warning(message) raise RuntimeError(message) self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)