def loadRules(fileTypeName, filename): """ Populate rule and multi_field_rule tables from rule rile Args: filename: File with rule specifications fileTypeName: Which type of file to load rules for """ validationDb = ValidatorValidationInterface() fileId = validationDb.getFileId(fileTypeName) with open(filename, 'rU') as ruleFile: reader = csv.DictReader(ruleFile) for record in reader: if(FieldCleaner.cleanString(record["is_single_field"]) == "true"): # Find column ID based on field name try: columnId = validationDb.getColumnId(FieldCleaner.cleanName(record["field_name"]),fileTypeName) except Exception as e: print("Failed on field " + FieldCleaner.cleanName(record["field_name"]) + " and file " + fileTypeName) raise e # Write to rule table if "rule_timing" in record and "rule_label" in record: validationDb.addRule(columnId,record["rule_type"],record["rule_text_one"],record["description"],record["rule_timing"],record["rule_label"]) else: validationDb.addRule(columnId,record["rule_type"],record["rule_text_one"],record["description"]) else: # Write to multi_field_rule table validationDb.addMultiFieldRule(fileId,record["rule_type"],record["rule_text_one"],record["rule_text_two"],record["description"])
def loadCrossRules(filename): """ Populate rule table with cross file validation rules """ validationDb = ValidatorValidationInterface() with open(filename, 'rU') as ruleFile: reader = csv.DictReader(ruleFile) for record in reader: fileId = validationDb.getFileId(record["file"]) if record["target_file"]: targetFileId = validationDb.getFileId(record["target_file"]) else: targetFileId = None # Look up rule timing id try: ruleTimingId = validationDb.getRuleTimingIdByName( FieldCleaner.cleanName(record["rule_timing"])) except Exception as e: raise Exception("".join( [str(e), "Cross-file rule load failed on timing value ", FieldCleaner.cleanName(record["rule_timing"]), " and file ", fileTypeName])) try: validationDb.addRule( None, record["rule_type"], record["rule_text_one"], record["rule_text_two"], record["description"], ruleTimingId, record["rule_label"], targetFileId, fileId = fileId) except Exception as e: raise Exception('{}: cross-file rule insert failed (rule={}'.format( e, record["description"]))
def loadFields(fileTypeName,schemaFileName): """ Load schema file to create validation rules and removes existing schemas Arguments: schemaFileName -- filename of csv file that holds schema definition fileTypeName -- the type of file that the schema represents """ #Step 1 Clean out the database database = ValidatorValidationInterface() database.removeRulesByFileType(fileTypeName) database.removeColumnsByFileType(fileTypeName) #Step 2 add the new fields with open(schemaFileName, 'rU') as csvfile: reader = csv.DictReader(csvfile) for record in reader: record = FieldCleaner.cleanRecord(record) if(LoaderUtils.checkRecord(record, ["fieldname","required","data_type"])) : columnId = database.addColumnByFileType(fileTypeName,FieldCleaner.cleanString(record["fieldname"]),record["required"],record["data_type"]) if "field_length" in record: # When a field length is specified, create a rule for it length = record["field_length"].strip() if(len(length) > 0): # If there are non-whitespace characters here, create a length rule database.addRule(columnId,"LENGTH",length,"Field must be no longer than specified limit") else : raise ValueError('CSV File does not follow schema')
def loadRules(fileTypeName, filename): """ Populate rule table from rule rile Args: filename: File with rule specifications fileTypeName: Which type of file to load rules for """ validationDb = ValidatorValidationInterface() fileId = validationDb.getFileId(fileTypeName) with open(filename, 'rU') as ruleFile: reader = csv.DictReader(ruleFile) for record in reader: if (FieldCleaner.cleanString( record["is_single_field"]) == "true"): # Find column ID based on field name try: columnId = validationDb.getColumnId( FieldCleaner.cleanName(record["field_name"]), fileTypeName) except Exception as e: raise Exception("".join([ str(e), "Failed on field ", FieldCleaner.cleanName(record["field_name"]), " and file ", fileTypeName ])) else: # Multi field rules don't get a file_column columnId = None # Look up rule timing id try: ruleTimingId = validationDb.getRuleTimingIdByName( FieldCleaner.cleanName(record["rule_timing"])) except Exception as e: raise Exception("".join([ str(e), " Rule load failed on timing value ", FieldCleaner.cleanName(record["rule_timing"]), " and file ", fileTypeName ])) # Target file info is applicable to cross-file rules only targetFileId = None # Write to rule table try: validationDb.addRule(columnId, str(record["rule_type"]), str(record["rule_text_one"]), str(record["rule_text_two"]), str(record["description"]), ruleTimingId, str(record["rule_label"]), targetFileId=targetFileId, fileId=fileId) except Exception as e: raise Exception( '{}: rule insert failed (file={}, rule={}'.format( e, fileTypeName, record["description"]))
def loadRules(fileTypeName, filename): """ Populate rule table from rule rile Args: filename: File with rule specifications fileTypeName: Which type of file to load rules for """ validationDb = ValidatorValidationInterface() fileId = validationDb.getFileId(fileTypeName) with open(filename, 'rU') as ruleFile: reader = csv.DictReader(ruleFile) for record in reader: if(FieldCleaner.cleanString(record["is_single_field"]) == "true"): # Find column ID based on field name try: columnId = validationDb.getColumnId(FieldCleaner.cleanName(record["field_name"]),fileTypeName) except Exception as e: raise Exception("".join([str(e),"Failed on field ",FieldCleaner.cleanName(record["field_name"])," and file ",fileTypeName])) else: # Multi field rules don't get a file_column columnId = None # Look up rule timing id try: ruleTimingId = validationDb.getRuleTimingIdByName( FieldCleaner.cleanName(record["rule_timing"])) except Exception as e: raise Exception("".join( [str(e), " Rule load failed on timing value ", FieldCleaner.cleanName(record["rule_timing"]), " and file ", fileTypeName])) # Target file info is applicable to cross-file rules only targetFileId = None # Write to rule table try: validationDb.addRule(columnId, str(record["rule_type"]), str(record["rule_text_one"]), str(record["rule_text_two"]), str(record["description"]), ruleTimingId, str(record["rule_label"]), targetFileId=targetFileId, fileId=fileId) except Exception as e: raise Exception('{}: rule insert failed (file={}, rule={}'.format( e, fileTypeName, record["description"]))
def loadRules(fileTypeName, filename): """ Populate rule and multi_field_rule tables from rule rile Args: filename: File with rule specifications fileTypeName: Which type of file to load rules for """ validationDb = ValidatorValidationInterface() fileId = validationDb.getFileId(fileTypeName) with open(filename, 'rU') as ruleFile: reader = csv.DictReader(ruleFile) for record in reader: if (FieldCleaner.cleanString( record["is_single_field"]) == "true"): # Find column ID based on field name try: columnId = validationDb.getColumnId( FieldCleaner.cleanName(record["field_name"]), fileTypeName) except Exception as e: print("Failed on field " + FieldCleaner.cleanName(record["field_name"]) + " and file " + fileTypeName) raise e # Write to rule table if "rule_timing" in record and "rule_label" in record: validationDb.addRule(columnId, record["rule_type"], record["rule_text_one"], record["description"], record["rule_timing"], record["rule_label"]) else: validationDb.addRule(columnId, record["rule_type"], record["rule_text_one"], record["description"]) else: # Write to multi_field_rule table validationDb.addMultiFieldRule(fileId, record["rule_type"], record["rule_text_one"], record["rule_text_two"], record["description"])
def loadFields(fileTypeName, schemaFileName): """ Load schema file to create validation rules and removes existing schemas Arguments: schemaFileName -- filename of csv file that holds schema definition fileTypeName -- the type of file that the schema represents """ #Step 1 Clean out the database database = ValidatorValidationInterface() database.removeRulesByFileType(fileTypeName) database.removeColumnsByFileType(fileTypeName) #Step 2 add the new fields with open(schemaFileName, 'rU') as csvfile: reader = csv.DictReader(csvfile) for record in reader: record = FieldCleaner.cleanRecord(record) if (LoaderUtils.checkRecord( record, ["fieldname", "required", "data_type"])): columnId = database.addColumnByFileType( fileTypeName, FieldCleaner.cleanString(record["fieldname"]), record["required"], record["data_type"]) if "field_length" in record: # When a field length is specified, create a rule for it length = record["field_length"].strip() if (len(length) > 0): # If there are non-whitespace characters here, create a length rule database.addRule( columnId, "LENGTH", length, "", "Field must be no longer than specified limit") else: raise ValueError('CSV File does not follow schema')
def loadCrossRules(filename): """ Populate rule table with cross file validation rules """ validationDb = ValidatorValidationInterface() with open(filename, 'rU') as ruleFile: reader = csv.DictReader(ruleFile) for record in reader: fileId = validationDb.getFileId(record["file"]) if record["target_file"]: targetFileId = validationDb.getFileId( record["target_file"]) else: targetFileId = None # Look up rule timing id try: ruleTimingId = validationDb.getRuleTimingIdByName( FieldCleaner.cleanName(record["rule_timing"])) except Exception as e: raise Exception("".join([ str(e), "Cross-file rule load failed on timing value ", FieldCleaner.cleanName(record["rule_timing"]), " and file ", fileTypeName ])) try: validationDb.addRule(None, record["rule_type"], record["rule_text_one"], record["rule_text_two"], record["description"], ruleTimingId, record["rule_label"], targetFileId, fileId=fileId) except Exception as e: raise Exception( '{}: cross-file rule insert failed (rule={}'.format( e, record["description"]))