def execute(self, state: "State"): """ Process each of the references, simply storing them as Reference objects """ glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( state) name = self._content["command_name"] issues = [] # Receive a list of validated references # Store them as objects, which can be referred to later for ref in self._content["items"]: r = ref["_row"] if "ref_id" not in ref: issues.append( Issue(itype=3, description="'ref_id' field not found: " + str(ref), location=IssueLocation(sheet_name=name, row=r, column=None))) continue else: ref_id = ref["ref_id"] existing = glb_idx.get(self.ref_type.partial_key(ref_id)) if len(existing) == 1: issues.append( Issue(itype=3, description="Reference '" + ref_id + "' of type '" + str(self.ref_type) + "' is already defined. Not allowed", location=IssueLocation(sheet_name=name, row=r, column=None))) continue elif len(existing) > 1: # This condition should not occur... issues.append( Issue(itype=3, description="The reference '" + ref_id + "' of type '" + str(self.ref_type) + "' is defined more than one time (" + str(len(existing)) + ")", location=IssueLocation(sheet_name=name, row=r, column=None))) continue # Create and store the Reference reference = self.ref_type(ref_id, ref) glb_idx.put(reference.key(), reference) # BibliographicReference and ProvenanceReference ar also Observer if isinstance(reference, Observer): glb_idx.put(Observer.key(reference), reference) return issues, None
def execute(self, state: "State"): any_error = False issues = [] sheet_name = self._content["command_name"] # Obtain global variables in state glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( state) scenarios = create_dictionary() solver_parameters = create_dictionary() for r, param in enumerate(self._content["items"]): parameter = param["parameter"] scenario = param["scenario_name"] p = glb_idx.get(Parameter.partial_key(parameter)) if scenario: if len(p) == 0: issues.append( Issue(itype=3, description="The parameter '" + parameter + "' has not been declared previously.", location=IssueLocation(sheet_name=sheet_name, row=r, column=None))) any_error = True continue p = p[0] name = p.name else: name = parameter value = param["parameter_value"] description = param.get( "description", None) # For readability of the workbook. Not used for solving if scenario: if scenario in scenarios: sp = scenarios[scenario] else: sp = create_dictionary() scenarios[scenario] = sp sp[name] = value else: solver_parameters[name] = value if not any_error: ps = ProblemStatement(solver_parameters, scenarios) glb_idx.put(ps.key(), ps) return issues, None
def transform_issues(issues: List[Union[dict, backend.Issue, tuple, Issue]], cmd, sheet_number: int) -> (List[Issue], bool): errors_exist = False new_issues: List[Issue] = [] for i in issues: if isinstance(i, dict): issue = Issue(itype=i["type"], description=i["message"], ctype=i["c_type"], location=IssueLocation( sheet_name=i["sheet_name"], sheet_number=i["sheet_number"])) elif isinstance(i, backend.Issue): # namedtuple issue = Issue(itype=i.type, description=i.message, ctype=i.c_type, location=IssueLocation(sheet_name=i.sheet_name, sheet_number=i.sheet_number)) elif isinstance(i, tuple): issue = Issue(itype=i[0], description=i[1], location=IssueLocation(sheet_name="")) else: # isinstance(i, Issue): issue = i if issue.itype == IType.error(): errors_exist = True if not issue.ctype and cmd: # "cmd" may be "None", in case the Issue is produced by the commands container loop issue.ctype = cmd._serialization_type if not issue.location.sheet_name or issue.location.sheet_name == "": issue.location.sheet_name = cmd._source_block_name if hasattr( cmd, "_source_block_name") else "" if not issue.location.sheet_number: issue.location.sheet_number = sheet_number new_issues.append(issue) return new_issues, errors_exist
def process_line(item): # Read variables dsd_dataset_name = item.get("dataset_name", None) dsd_dataset_data_location = item.get("dataset_data_location", None) dsd_concept_type = item.get("concept_type", None) dsd_concept_name = item.get("concept_name", None) dsd_concept_data_type = item.get("concept_data_type", None) dsd_concept_domain = item.get("concept_domain", None) dsd_concept_description = item.get("concept_description", None) dsd_attributes = item.get("concept_attributes", None) if dsd_attributes: try: attributes = dictionary_from_key_value_list( dsd_attributes, glb_idx) except Exception as e: issues.append( Issue(itype=3, description=str(e), location=IssueLocation(sheet_name=name, row=r, column=None))) return else: attributes = {} if dsd_dataset_name in ds_names: issues.append( Issue(itype=3, description="The dataset '" + dsd_dataset_name + "' has been already defined", location=IssueLocation(sheet_name=name, row=r, column=None))) return # Internal dataset definitions cache ds = current_ds.get(dsd_dataset_name, None) if True: # Statistical dataset format if not ds: ds = Dataset() ds.code = dsd_dataset_name # Name if not dsd_concept_type: attributes[ "_location"] = dsd_dataset_data_location # Location ds.description = dsd_concept_description ds.attributes = attributes # Set attributes ds.database = None current_ds[dsd_dataset_name] = ds # If concept_type is defined => add a concept if dsd_concept_type: d = Dimension() d.dataset = ds d.description = dsd_concept_description d.code = dsd_concept_name d.is_measure = False if dsd_concept_type.lower( ) == "dimension" else True if not d.is_measure and dsd_concept_data_type.lower( ) == "time": d.is_time = True else: d.is_time = False if dsd_concept_type.lower() == "attribute": attributes["_attribute"] = True else: attributes["_attribute"] = False if dsd_concept_data_type.lower() == "category": # TODO "hierarchies" variable really does not register hierarchies (see "hierarchy_command.py" or "hierarchy_categories_command.py", no insertion is made) # h = hierarchies.get(dsd_concept_domain, None) h = glb_idx.get( Hierarchy.partial_key(name=dsd_concept_domain)) if len(h) == 0: issues.append( Issue( itype=3, description= "Could not find hierarchy of Categories '" + dsd_concept_domain + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) return elif len(h) > 1: issues.append( Issue( itype=3, description= "Found more than one instance of Categories '" + dsd_concept_domain + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) return else: # len(h) == 1 h = h[0] d.hierarchy = h # Reencode the Hierarchy as a CodeList cl = convert_hierarchy_to_code_list(h) d.code_list = cl attributes["_datatype"] = dsd_concept_data_type attributes["_domain"] = dsd_concept_domain d.attributes = attributes
def execute(self, state: "State"): def process_line(item): # Read variables dsd_dataset_name = item.get("dataset_name", None) dsd_dataset_data_location = item.get("dataset_data_location", None) dsd_concept_type = item.get("concept_type", None) dsd_concept_name = item.get("concept_name", None) dsd_concept_data_type = item.get("concept_data_type", None) dsd_concept_domain = item.get("concept_domain", None) dsd_concept_description = item.get("concept_description", None) dsd_attributes = item.get("concept_attributes", None) if dsd_attributes: try: attributes = dictionary_from_key_value_list( dsd_attributes, glb_idx) except Exception as e: issues.append( Issue(itype=3, description=str(e), location=IssueLocation(sheet_name=name, row=r, column=None))) return else: attributes = {} if dsd_dataset_name in ds_names: issues.append( Issue(itype=3, description="The dataset '" + dsd_dataset_name + "' has been already defined", location=IssueLocation(sheet_name=name, row=r, column=None))) return # Internal dataset definitions cache ds = current_ds.get(dsd_dataset_name, None) if True: # Statistical dataset format if not ds: ds = Dataset() ds.code = dsd_dataset_name # Name if not dsd_concept_type: attributes[ "_location"] = dsd_dataset_data_location # Location ds.description = dsd_concept_description ds.attributes = attributes # Set attributes ds.database = None current_ds[dsd_dataset_name] = ds # If concept_type is defined => add a concept if dsd_concept_type: d = Dimension() d.dataset = ds d.description = dsd_concept_description d.code = dsd_concept_name d.is_measure = False if dsd_concept_type.lower( ) == "dimension" else True if not d.is_measure and dsd_concept_data_type.lower( ) == "time": d.is_time = True else: d.is_time = False if dsd_concept_type.lower() == "attribute": attributes["_attribute"] = True else: attributes["_attribute"] = False if dsd_concept_data_type.lower() == "category": # TODO "hierarchies" variable really does not register hierarchies (see "hierarchy_command.py" or "hierarchy_categories_command.py", no insertion is made) # h = hierarchies.get(dsd_concept_domain, None) h = glb_idx.get( Hierarchy.partial_key(name=dsd_concept_domain)) if len(h) == 0: issues.append( Issue( itype=3, description= "Could not find hierarchy of Categories '" + dsd_concept_domain + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) return elif len(h) > 1: issues.append( Issue( itype=3, description= "Found more than one instance of Categories '" + dsd_concept_domain + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) return else: # len(h) == 1 h = h[0] d.hierarchy = h # Reencode the Hierarchy as a CodeList cl = convert_hierarchy_to_code_list(h) d.code_list = cl attributes["_datatype"] = dsd_concept_data_type attributes["_domain"] = dsd_concept_domain d.attributes = attributes # ------------------------------------------------------------------------------------------------------------- issues = [] glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( state) name = self._content["command_name"] # List of available dataset names. The newly defined datasets must not be in this list ds_names = [ds.name for ds in datasets] # List of available Category hierarchies hierarchies = create_dictionary() for h in hh: hierarchies[h.name] = hh # Datasets being defined in this Worksheet current_ds = create_dictionary() # Process parsed information for line in self._content["items"]: r = line["_row"] # If the line contains a reference to a dataset or hierarchy, expand it # If not, process it directly is_expansion = False if is_expansion: pass else: process_line(line) # Any error? for issue in issues: if issue.itype == 3: error = True break else: error = False # Load the data for those datasets that are not local (data defined later in the same spreadsheet) for ds in current_ds.values(): if "_location" not in ds.attributes: error = True issues.append( Issue(itype=3, description= "Location of data not specified for dataset '" + ds.code + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) else: loc = ds.attributes["_location"] ast = parser_field_parsers.string_to_ast(url_parser, loc) if ast["scheme"] != "data": df = load_dataset(loc) if df is None: error = True issues.append( Issue(itype=3, description= "Could not obtain data for dataset '" + ds.code + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) else: iss = prepare_dataframe_after_external_read(ds, df) for issue in iss: issues.append( Issue(itype=3, description=issue, location=IssueLocation(sheet_name=name, row=-1, column=-1))) # Everything ok? Store the dataframe! if len(iss) == 0: ds.data = df if not error: # If no error happened, add the new Datasets to the Datasets in the "global" state for ds in current_ds: datasets[ds] = current_ds[ds] return issues, None
def process_line(item): # Read variables ft_h_name = item.get( "interface_type_hierarchy", "_default") # "_default" InterfaceType Hierarchy NAME <<<<<< ft_name = item.get("interface_type", None) ft_sphere = item.get("sphere", None) ft_roegen_type = item.get("roegen_type", None) ft_parent = item.get("parent_interface_type", None) ft_formula = item.get("formula", None) ft_description = item.get("description", None) ft_unit = item.get("unit", None) # ft_orientation = item.get("orientation", None) ft_unit = item.get("unit", None) ft_attributes = item.get("attributes", {}) if ft_attributes: try: attributes = dictionary_from_key_value_list( ft_attributes, glb_idx) except Exception as e: issues.append( Issue(itype=3, description=str(e), location=IssueLocation(sheet_name=name, row=r, column=None))) return else: attributes = {} # Process # Mandatory fields if not ft_h_name: issues.append( Issue(itype=3, description= "Empty interface type hierarchy name. Skipped.", location=IssueLocation(sheet_name=name, row=r, column=None))) return if not ft_name: issues.append( Issue(itype=3, description="Empty interface type name. Skipped.", location=IssueLocation(sheet_name=name, row=r, column=None))) return # Check if a hierarchy of interface types by the name <ft_h_name> exists, if not, create it and register it hie = glb_idx.get(Hierarchy.partial_key(name=ft_h_name)) if not hie: hie = Hierarchy(name=ft_h_name, type_name="interfacetype") glb_idx.put(hie.key(), hie) else: hie = hie[0] # If parent defined, check if it exists # (it must be registered both in the global registry AND in the hierarchy) if ft_parent: parent = glb_idx.get(FactorType.partial_key(ft_parent)) if len(parent) > 0: for p in parent: if p.hierarchy == hie: parent = p break if not isinstance(parent, FactorType): issues.append( Issue(itype=3, description="Parent interface type name '" + ft_parent + "' not found in hierarchy '" + ft_h_name + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) return else: issues.append( Issue(itype=3, description="Parent interface type name '" + ft_parent + "' not found", location=IssueLocation(sheet_name=name, row=r, column=None))) return # Double check, it must be defined in "hie" if ft_parent not in hie.codes: issues.append( Issue(itype=3, description="Parent interface type name '" + ft_parent + "' not registered in the hierarchy '" + ft_h_name + "'", location=IssueLocation(sheet_name=name, row=r, column=None))) return else: parent = None # Check if FactorType exists ft = glb_idx.get(FactorType.partial_key(ft_name)) if len(ft) == 0: # TODO Compile and CONSIDER attributes (on the FactorType side) roegen_type = None if ft_roegen_type: roegen_type = FlowFundRoegenType.flow if strcmp( ft_roegen_type, "flow") else FlowFundRoegenType.fund ft = FactorType( ft_name, parent=parent, hierarchy=hie, roegen_type=roegen_type, tags=None, # No tags attributes=dict(unit=ft_unit, description=ft_description, **ft_attributes), expression=ft_formula, # orientation=ft_orientation, sphere=ft_sphere) # Simple name glb_idx.put(FactorType.partial_key(ft_name, ft.ident), ft) if not strcmp(ft_name, ft.full_hierarchy_name()): glb_idx.put( FactorType.partial_key(ft.full_hierarchy_name(), ft.ident), ft) else: issues.append( Issue(itype=3, description="Interface type name '" + ft_name + "' already registered", location=IssueLocation(sheet_name=name, row=r + 1, column=None))) return
def parse_dataset_data_command(sh: Worksheet, area: AreaTupleType, name: str, state) -> IssuesLabelContentTripleType: """ Check that the syntax of the input spreadsheet is correct Return the analysis in JSON compatible format, for execution :param sh: Input worksheet :param area: Area of the input worksheet to be analysed :return: The command in a dict-list object (JSON ready) """ issues: List[Issue] = [] # Analyze column names col_map = create_dictionary() for c in range(area[2], area[3]): col_name = sh.cell(row=area[0], column=c).value.strip() # Avoid repetitions if col_name in col_map: issues.append( Issue(itype=3, description="The column name '" + col_name + "' is repeated", location=IssueLocation(sheet_name=name, row=1, column=c))) if strcmp(col_name, "DatasetName") or strcmp(col_name, "Dataset"): col_map["dataset"] = c elif col_name: # Concept name col_map[col_name] = c if "dataset" not in col_map: issues.append( Issue( itype=3, description= "The column name 'DatasetName' is not defined for command 'DatasetData'", location=IssueLocation(sheet_name=name, row=1, column=c))) if any([i.itype == 3 for i in issues]): return issues, None, None # Read all the content into a list of lists lines = [] for r in range(area[0] + 1, area[1]): line = [] for col_name, c in col_map.items(): v = sh.cell(row=r, column=c).value if isinstance(v, str): v = v.strip() line.append(v) lines.append(line) # pd.DataFrame df = pd.DataFrame(columns=[col_name for col_name in col_map], data=lines) # Find the different datasets datasets = df["dataset"].unique() datasets = set([d.lower() for d in datasets]) content = [] # The output JSON for dataset in datasets: # Obtain filtered df2 = df.loc[df['dataset'].str.lower() == dataset] # Convert to JSON and store in content del df2["dataset"] s = StringIO() df2.to_json(s, orient="split") content.append(dict(name=dataset, values=s.getvalue())) return issues, None, dict(items=content, command_name=name)
def process_line(item): sc_src_hierarchy = item.get("source_hierarchy") sc_src_interface_type = item.get("source_interface_type") sc_tgt_hierarchy = item.get("target_hierarchy") sc_tgt_interface_type = item.get("target_interface_type") sc_scale = item.get("scale") sc_src_context = item.get("source_context") sc_tgt_context = item.get("target_context") sc_src_unit = item.get("source_unit") sc_tgt_unit = item.get("target_unit") # Check the existence of the interface types force_create = True if force_create: pass # Check if FactorTypes exist fts = [] for i, (hierarchy, interface_type) in enumerate([ (sc_src_hierarchy, sc_src_interface_type), (sc_tgt_hierarchy, sc_tgt_interface_type) ]): m = "origin" if i == 0 else "destination" if not interface_type: issues.append( Issue(itype=3, description="The " + m + "interface type name has not been specified", location=IssueLocation(sheet_name=name, row=r, column=None))) return # Check if FactorType exists ft = glb_idx.get(FactorType.partial_key(interface_type)) if len(ft) > 0: if len(ft) == 1: fts.append(ft[0]) else: if not hierarchy: issues.append( Issue( itype=3, description="The hierarchy of the " + m + "interface type name has not been specified and the interface type name is not unique", location=IssueLocation(sheet_name=name, row=r, column=None))) return for ft2 in ft: if strcmp(ft2.hierarchy.name, hierarchy): fts.append(ft2) if len(fts) != 2: issues.append( Issue( itype=3, description="Found " + str(len(fts)) + " interface types in the specification of a scale change", location=IssueLocation(sheet_name=name, row=r, column=None))) return # Check that the interface types are from different hierarchies (warn if not; not error) if fts[0].hierarchy == fts[1].hierarchy: issues.append( Issue(itype=2, description="The interface types '" + fts[0].name + "' and '" + fts[1].name + "' are in the same hierarchy", location=IssueLocation(sheet_name=name, row=r, column=None))) # Create the directed Scale (Linear "Transformation") Relationship origin = fts[0] destination = fts[1] FactorTypesRelationUnidirectionalLinearTransformObservation.\ create_and_append(origin, destination, sc_scale, sc_src_context, sc_tgt_context, Observer.no_observer_specified)
def parse_command(sh: Worksheet, area: AreaTupleType, name: Optional[str], cmd_name: str) -> IssuesLabelContentTripleType: """ Parse command in general Generate a JSON Generate a list of issues :param sh: Worksheet to read :param area: Area of the worksheet :param name: Name of the worksheet :param cmd_name: Name of the command. Key to access "command_fields" variable. Also, shown in issue descriptions :return: issues List, None, content (JSON) """ issues: List[Issue] = [] from backend.command_field_definitions import command_fields cols = command_fields[ cmd_name] # List of CommandField that will guide the parsing ##sh_dict = read_worksheet(sh) ##col_map, local_issues = check_columns(sh_dict, name, area, cols, cmd_name) col_map, local_issues = check_columns(sh, name, area, cols, cmd_name) if any([i.itype == 3 for i in local_issues]): return local_issues, None, None issues.extend(local_issues) # "mandatory" can be defined as expression depending on other base fields (like in RefBibliographic command fields) # Elaborate a list of fields having this "complex" mandatory property complex_mandatory_cols = [c for c in cols if isinstance(c.mandatory, str)] content = [] # The output JSON # Parse each Row for r in range(area[0] + 1, area[1]): line = {} expandable = False # The line contains at least one field implying expansion into multiple lines complex = False # The line contains at least one field with a complex rule (which cannot be evaluated with a simple cast) # Constant mandatory values mandatory_not_found = set([ c.name for c in cols if c.mandatory and isinstance(c.mandatory, bool) ]) # Each "field" for col in col_map.keys(): cname = col.name # Appearances of field (normally just once, there attributes allowing more than one appearance) for col_name, col_idx in col_map[col]: # Read and prepare "value" ##value = sh_dict.get((r, col_idx), None) value = sh.cell(row=r, column=col_idx).value if value: if not isinstance(value, str): value = str(value) value = value.strip() else: continue if col.allowed_values: # If the CommandField checks for a list of allowed values if value.lower() not in [ v.lower() for v in col.allowed_values ]: # TODO Case insensitive CI issues.append( Issue( itype=3, description= f"Field '{col_name}' of command '{cmd_name}' has invalid value '{value}'." f" Allowed values are: {', '.join(col.allowed_values)}.", location=IssueLocation(sheet_name=name, row=r, column=col_idx))) else: line[cname] = value else: # Instead of a list of values, check if a syntactic rule is met by the value if col.parser: # Parse, just check syntax (do not store the AST) try: ast = parser_field_parsers.string_to_ast( col.parser, value) # Rules are in charge of informing if the result is expandable and if it complex if "expandable" in ast and ast["expandable"]: expandable = True if "complex" in ast and ast["complex"]: complex = True # With many appearances, just a "Key-Value list" syntax is permitted if col.many_appearances: if cname in line: line[ cname] += ", " + col_name + "='" + value + "'" else: line[cname] = col_name + "='" + value + "'" else: if cname in line: line[cname] += ", " + value else: line[cname] = value # Store the value except: ##col_header = sh_dict.get((1, col_idx), None) col_header = sh.cell(row=1, column=col_idx).value issues.append( Issue( itype=3, description="The value in field '" + col_header + "' of command '" + cmd_name + "' is not syntactically correct. Entered: " + value, location=IssueLocation(sheet_name=name, row=r, column=col_idx))) else: line[ cname] = value # No parser, just store blindly the value if col.name in mandatory_not_found: mandatory_not_found.discard(col.name) if len(line) == 0: continue # Empty line (allowed) # Flags to accelerate the second evaluation, during execution line["_row"] = r line["_expandable"] = expandable line["_complex"] = complex # Append if all mandatory fields have been filled may_append = True if len(mandatory_not_found) > 0: issues.append( Issue(itype=3, description="Mandatory columns: " + ", ".join(mandatory_not_found) + " have not been specified", location=IssueLocation(sheet_name=name, row=r, column=None))) may_append = False # Check varying mandatory fields (fields depending on the value of other fields) for c in complex_mandatory_cols: col = c.name # next(c2 for c2 in col_map if strcmp(c.name, c2.name)) if isinstance(c.mandatory, str): # Evaluate mandatory = eval(c.mandatory, None, line) may_append = (mandatory and col in line) or (not mandatory) if mandatory and col not in line: issues.append( Issue(itype=3, description="Mandatory column: " + col + " has not been specified", location=IssueLocation(sheet_name=name, row=r, column=None))) if may_append: content.append(line) return issues, None, {"items": content, "command_name": name}
def check_columns(sh, name: str, area: Tuple, cols: List[CommandField], command_name: str, ignore_not_found=False): """ When parsing of a command starts, check columns Try to match each column with declared column fields. If a column is not declared, raise an error (or ignore it) If mandatory columns are not found, raise an error :param sh: The worksheet being analyzed :param name: The name of the worksheet :param area: Area inside the worksheet that will be scanned :param cols: List of CommandField :param command_name: A string with the name of the command :param ignore_not_found: True if a column not matching declared ones has to be ignored, False if an error has to be raised in this case :return: The map column name to column index (or indices for multiply declared columns); The issues found """ issues: List[Issue] = [] # Set of mandatory columns mandatory_not_found = set([c.name for c in cols if c.mandatory]) # Check columns col_map = {} # From CommandField to a list of column index for c in range(area[2], area[3]): # For each column of row 0 (Header Row) ##val = sh.get((area[0], c), None) val = sh.cell(row=area[0], column=c).value if not val: continue col_name = val.strip() for col in cols: # Find matching CommandField from the attribute "regex_allowed_names" if col.regex_allowed_names.match(col_name): # Found matching CommandField "col". Process if "@" in col_name: # In case of use of "@", remove prefix col_name = col_name[col_name.index("@") + 1:] # Column Name to Column Index if not col.many_appearances: # Column appears once if col in col_map: issues.append( Issue(itype=3, description="The column '" + col.name + "' should not appear more than one time", location=IssueLocation(sheet_name=name, row=1, column=c))) col_map[col] = [(col_name, c)] else: # Column appears one or more times if col not in col_map: col_map[col] = [] col_map[col].append((col_name, c)) # Mandatory found (good) if col.name in mandatory_not_found: mandatory_not_found.discard(col.name) break else: # No match for the column "col_name" if not ignore_not_found: issues.append( Issue( itype=3, description="The column name '" + col_name + "' does not match any of the allowed column names for the command '" + command_name + "'", location=IssueLocation(sheet_name=name, row=1, column=c))) if len(mandatory_not_found) > 0: issues.append( Issue(itype=3, description="Mandatory columns: " + ", ".join(mandatory_not_found) + " have not been specified", location=IssueLocation(sheet_name=name, row=1, column=None))) return col_map, issues
def create_issue(itype: int, description: str) -> Issue: return Issue(itype=itype, description=description, location=IssueLocation(sheet_name=command_name, row=row, column=None))