def process(self) -> str: """ """ browse_full_path = CFile.join_file(self.view_path, '{0}_browse.png'.format(self.object_id)) thumb_full_path = CFile.join_file(self.view_path, '{0}_thumb.jpg'.format(self.object_id)) geotiff_full_path = CFile.join_file(self.view_path, '{0}_browse.tiff'.format(self.object_id)) # 进程调用模式 json_out_view = CJson() json_out_view.set_value_of_name('image_path', self.transform_file) json_out_view.set_value_of_name('browse_full_path', browse_full_path) json_out_view.set_value_of_name('thumb_full_path', thumb_full_path) json_out_view.set_value_of_name('geotiff_full_path', geotiff_full_path) result_view = CProcessUtils.processing_method(self.create_view_json, json_out_view) # result_view = self.create_view(self.file_info.file_name_with_full_path, browse_full_path, thumb_full_path, # geotiff_full_path) # result_view = self.create_view_json(json_out_view) if CResult.result_success(result_view): # 清理不必要的文件 delect_file_list = list() delect_file_list.append('{0}.aux.xml'.format(browse_full_path)) delect_file_list.append('{0}.aux.xml'.format(thumb_full_path)) delect_file_list.append(geotiff_full_path) for delect_file in delect_file_list: if CFile.file_or_path_exist(delect_file): CFile.remove_file(delect_file) result = CResult.merge_result(self.Success, '处理完毕!') result = CResult.merge_result_info(result, self.Name_Browse, CFile.file_name(browse_full_path)) result = CResult.merge_result_info(result, self.Name_Thumb, CFile.file_name(thumb_full_path)) result = CResult.merge_result_info(result, self.Name_Browse_GeoTiff, CFile.file_name(geotiff_full_path)) else: # 清理不必要的文件 delect_file_list = list() delect_file_list.append(browse_full_path) delect_file_list.append(thumb_full_path) delect_file_list.append('{0}.aux.xml'.format(browse_full_path)) delect_file_list.append('{0}.aux.xml'.format(thumb_full_path)) delect_file_list.append(geotiff_full_path) for delect_file in delect_file_list: if CFile.file_or_path_exist(delect_file): CFile.remove_file(delect_file) result = result_view return result
def __init__(self, file_type, file_name_with_full_path): self.__file_name_with_full_path = file_name_with_full_path self.__file_name_without_path = CFile.file_name( self.file_name_with_full_path) self.__file_main_name = CFile.file_main_name( self.file_name_with_full_path) self.__file_ext = CFile.file_ext(self.file_name_with_full_path) self.__file_path = CFile.file_path(self.file_name_with_full_path) self.__file_main_name_with_full_path = CFile.join_file( self.file_path, self.file_main_name) self.__file_type = file_type self.__file_existed = CFile.file_or_path_exist( self.file_name_with_full_path) if self.__file_existed: if CFile.is_file(self.file_name_with_full_path): self.__file_size = CFile.file_size( self.file_name_with_full_path) self.__file_create_time = CFile.file_create_time( self.file_name_with_full_path) self.__file_access_time = CFile.file_access_time( self.file_name_with_full_path) self.__file_modify_time = CFile.file_modify_time( self.file_name_with_full_path)
def set_custom_affiliated_file(self): custom_affiliated_file_list = self.get_custom_affiliated_file_character( ) if len(custom_affiliated_file_list) > 0: for affiliated_file_info in custom_affiliated_file_list: affiliated_file_path = CUtils.dict_value_by_name( affiliated_file_info, self.Name_FilePath, None) regex_match = CUtils.dict_value_by_name( affiliated_file_info, self.Name_RegularExpression, None) no_match = CUtils.dict_value_by_name( affiliated_file_info, self.Name_No_Match_RegularExpression, None) if (affiliated_file_path is not None) and (regex_match is not None): affiliated_file_name_list = CFile.file_or_dir_fullname_of_path( affiliated_file_path, False, regex_match, CFile.MatchType_Regex) # 模糊匹配文件列表 if len(affiliated_file_name_list) > 0: for affiliated_file_name in affiliated_file_name_list: if no_match is None: self._object_detail_file_full_name_list.append( affiliated_file_name) else: if not CUtils.text_match_re( CFile.file_name(affiliated_file_name), no_match): self._object_detail_file_full_name_list.append( affiliated_file_name)
def package(output_relation_dir): """ 编译根目录下的包括子目录里的所有py文件成pyc文件到新的文件夹下 :param output_relation_dir: 需编译的目录 :return: """ output_relation_dir = CFile.unify(CUtils.any_2_str(output_relation_dir)) application_dir = CFile.file_path(CFile.file_abs_path(__file__)) output_dir = CFile.file_abs_path(CFile.join_file(application_dir, output_relation_dir)) for each_directory, dir_name_list, file_name_without_path_list in os.walk(application_dir): directory_source = each_directory directory_name = CFile.file_name(directory_source) directory_relation = CFile.file_relation_path(each_directory, application_dir) directory_target = CFile.join_file(output_dir, directory_relation) path_deploy_enable = deploy_match_pattern_list(directory_relation, 'path.white_list', True, True) path_deploy_enable = path_deploy_enable and deploy_match_pattern_list(directory_relation, 'path.black_list', False, True) if path_deploy_enable: directory_deploy_enable = deploy_match_pattern_list(directory_name, 'directory.white_list', True, True) directory_deploy_enable = directory_deploy_enable and deploy_match_pattern_list( directory_name, 'directory.black_list', False, True ) if directory_deploy_enable: for file_name_without_path in file_name_without_path_list: file_deploy_enable = deploy_match_pattern_list( file_name_without_path, 'file.white_list', True, True ) file_deploy_enable = file_deploy_enable and deploy_match_pattern_list( file_name_without_path, 'file.black_list', False, True ) file_name_with_path_source = CFile.join_file(directory_source, file_name_without_path) if file_deploy_enable: file_compile_enable = deploy_match_pattern_list( file_name_without_path, 'compile.file.white_list', True, False ) if file_compile_enable: file_compile_enable = deploy_match_pattern_list( file_name_without_path, 'compile.file.black_list', False, False ) file_name_without_path_target = CFile.change_file_ext(file_name_without_path, 'pyc') file_name_with_path_target = CFile.join_file(directory_target, file_name_without_path_target) CFile.check_and_create_directory_itself(directory_target) if file_compile_enable: py_compile.compile(file_name_with_path_source, cfile=file_name_with_path_target) print('{0}-compile-success'.format(file_name_with_path_source)) else: CFile.copy_file_to(file_name_with_path_source, directory_target) print('{0}-no_compile'.format(file_name_with_path_source))
def get_classified_object_name_of_sat(self, sat_file_status) -> str: if sat_file_status == self.Sat_Object_Status_Zip: return self.file_info.file_main_name elif sat_file_status == self.Sat_Object_Status_Dir: return self.file_info.file_name_without_path elif sat_file_status == self.Sat_Object_Status_File: return CFile.file_name(self.file_info.file_path) else: return self.file_info.file_main_name
def get_classified_character_of_affiliated_keyword(self): """ 设置识别的特征 """ file_mian_name = self.file_info.file_main_name file_path = self.file_info.file_path same_name_list = CFile.file_or_subpath_of_path(file_path, file_mian_name[:-1] + r'.\..*$', CFile.MatchType_Regex) last_letter_list = list() if len(same_name_list) > 0: for same_file_full_name in same_name_list: same_file_name = CFile.file_name(same_file_full_name) last_letter_list.append(same_file_name[-1:].lower()) if 'a' in last_letter_list: RegularExpression_letter = 'bcd' RegularExpression_main_letter = 'a' elif 'b' in last_letter_list: RegularExpression_letter = 'cd' RegularExpression_main_letter = 'b' elif 'c' in last_letter_list: RegularExpression_letter = 'd' RegularExpression_main_letter = 'c' elif 'd' in last_letter_list: RegularExpression_letter = '' RegularExpression_main_letter = 'd' else: RegularExpression_letter = 'bcd' RegularExpression_main_letter = 'a' else: RegularExpression_letter = 'bcd' RegularExpression_main_letter = 'a' return [ { self.Name_ID: self.Name_FileName, # 配置附属文件名的匹配规则 self.Name_RegularExpression: r'(?i)^.{10}\d{2}[pm]\d{4}[' + RegularExpression_letter + 'mp]$' }, { self.Name_ID: self.Name_FilePath, # 配置附属文件路径的匹配规则 self.Name_RegularExpression: r'(?i)\d{4}.{2}[\\\\/]FenFu[\\\\/]' + self.get_coordinate_system_title() }, { self.Name_ID: self.Name_FileExt, self.Name_RegularExpression: '(?i)^(tif|tiff|tfw|xml)$' # 配置附属文件后缀名的匹配规则 }, { self.Name_ID: self.Name_FileMain, # 配置需要验证主文件存在性的 文件路径 self.Name_FilePath: self.file_info.file_path, # 配置需要验证主文件的匹配规则,对于文件全名匹配 self.Name_RegularExpression: '(?i)^' + self.file_info.file_main_name[:-1] + '[o' + RegularExpression_main_letter + r']\.tif[f]?' } ]
def a_file(cls, audit_id, audit_title, audit_group, audit_result, file_name_with_path, qa_items: dict) -> list: result_dict = cls.__init_audit_dict__(audit_id, audit_title, audit_group, audit_result) if CFile.file_or_path_exist(file_name_with_path): return cls.__a_check_file__(result_dict, file_name_with_path, qa_items) else: result_dict[cls.Name_Message] = '文件[{0}]不存在, 请检查'.format( CFile.file_name(file_name_with_path)) return [result_dict]
def qa_file_custom(self, parser: CMetaDataParser): """ 自定义的文件存在性质检, 发生在元数据解析之前 完成 负责人 王学谦 :param parser: :return: """ super().qa_file_custom(parser) file_path = self.file_info.file_path file_main_name = self.file_info.file_main_name check_file_metadata_bus_exist = False ext = self.Transformer_XML metadata_name_with_path = CFile.join_file( file_path, '{0}M.xml'.format(file_main_name[:-1])) if CFile.file_or_path_exist(metadata_name_with_path): check_file_metadata_bus_exist = True self.metadata_bus_transformer_type = ext self.metadata_bus_src_filename_with_path = metadata_name_with_path if not check_file_metadata_bus_exist: parser.metadata.quality.append_total_quality({ self.Name_FileName: '', self.Name_ID: 'metadata_file', self.Name_Title: '元数据文件', self.Name_Result: self.QA_Result_Error, self.Name_Group: self.QA_Group_Data_Integrity, self.Name_Message: '本文件缺少业务元数据' }) else: parser.metadata.quality.append_total_quality({ self.Name_FileName: self.metadata_bus_src_filename_with_path, self.Name_ID: 'metadata_file', self.Name_Title: '元数据文件', self.Name_Result: self.QA_Result_Pass, self.Name_Group: self.QA_Group_Data_Integrity, self.Name_Message: '业务元数据[{0}]存在'.format( CFile.file_name(self.metadata_bus_src_filename_with_path)) })
def process_ndi_table(self): object_table_id = self._obj_id # 获取oid metadata_bus_dict = self.get_metadata_bus_dict() ndi_table_name = CUtils.dict_value_by_name(self.information(), 'ndi_table_name', 'ap_product_ndi') ndi_table = CTable() ndi_table.load_info(self._db_id, ndi_table_name) ndi_table.column_list.column_by_name('id').set_value(object_table_id) productname = CUtils.dict_value_by_name(metadata_bus_dict, 'productname', None) if CUtils.equal_ignore_case(productname, ''): productname = self._dataset.value_by_name(0, 'dsoobjectname', None) ndi_table.column_list.column_by_name('rid').set_value(productname) ndi_table.column_list.column_by_name('fid').set_value(object_table_id) ndi_table.column_list.column_by_name('satelliteid').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'satelliteid', None)) ndi_table.column_list.column_by_name('sensorid').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'sensorid', None)) ndi_table.column_list.column_by_name('topleftlatitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'topleftlatitude', None)) ndi_table.column_list.column_by_name('topleftlongitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'topleftlongitude', None)) ndi_table.column_list.column_by_name('toprightlatitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'toprightlatitude', None)) ndi_table.column_list.column_by_name('toprightlongitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'toprightlongitude', None)) ndi_table.column_list.column_by_name('bottomrightlatitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'bottomrightlatitude', None)) ndi_table.column_list.column_by_name('bottomrightlongitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'bottomrightlongitude', None)) ndi_table.column_list.column_by_name('bottomleftlatitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'bottomleftlatitude', None)) ndi_table.column_list.column_by_name('bottomleftlongitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'bottomleftlongitude', None)) ndi_table.column_list.column_by_name('centerlatitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'centerlatitude', None)) ndi_table.column_list.column_by_name('centerlongitude').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'centerlongitude', None)) transformimg = CUtils.dict_value_by_name(metadata_bus_dict, 'transformimg', None) if not CUtils.equal_ignore_case(transformimg, ''): view_path = settings.application.xpath_one( self.Path_Setting_MetaData_Dir_View, None) browser_path = CFile.file_path( self._dataset.value_by_name(0, 'dso_browser', None)) file_list = CFile.file_or_dir_fullname_of_path( CFile.join_file(view_path, browser_path), False, transformimg, CFile.MatchType_Regex) if len(file_list) > 0: ndi_table.column_list.column_by_name('transformimg').set_value( '{0}{1}'.format( CFile.sep(), CFile.join_file(browser_path, CFile.file_name(file_list[0])))) ndi_table.column_list.column_by_name('filesize').set_sql(''' (select sum(dodfilesize) from dm2_storage_obj_detail where dodobjectid='{0}') '''.format(object_table_id)) ndi_table.column_list.column_by_name('dataexist').set_value(0) ndi_table.column_list.column_by_name('centertime').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'centertime', None)) resolution = CUtils.dict_value_by_name(metadata_bus_dict, 'resolution', None) if not CUtils.equal_ignore_case(resolution, ''): ndi_table.column_list.column_by_name('resolution').set_value( resolution) else: ndi_table.column_list.column_by_name('resolution').set_value(0) rollangle = CUtils.dict_value_by_name(metadata_bus_dict, 'rollangle', 0) if CUtils.equal_ignore_case(rollangle, ''): rollangle = 0 ndi_table.column_list.column_by_name('rollangle').set_value(rollangle) cloudpercent = CUtils.dict_value_by_name(metadata_bus_dict, 'cloudpercent', 0) if CUtils.equal_ignore_case(cloudpercent, ''): cloudpercent = 0 ndi_table.column_list.column_by_name('cloudpercent').set_value( cloudpercent) ndi_table.column_list.column_by_name('dataum').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'dataum', None)) ndi_table.column_list.column_by_name('acquisition_id').set_value( CUtils.dict_value_by_name(metadata_bus_dict, 'acquisition_id', None)) result = ndi_table.save_data() return result
def qa_file_custom(self, parser: CMetaDataParser): """ 自定义的文件存在性质检, 发生在元数据解析之前 完成 负责人 王学谦 :param parser: :return: """ file_path = self.file_info.file_path file_main_name = self.file_info.file_main_name check_file_metadata_bus_exist = False ext = self.Transformer_XML metadata_name_with_path = CFile.join_file(file_path, '{0}M.xml'.format(file_main_name[:-1])) if CFile.file_or_path_exist(metadata_name_with_path): check_file_metadata_bus_exist = True self.metadata_bus_transformer_type = ext self.metadata_bus_src_filename_with_path = metadata_name_with_path if not check_file_metadata_bus_exist: parser.metadata.quality.append_total_quality( { self.Name_FileName: '', self.Name_ID: 'metadata_file', self.Name_Title: '元数据文件', self.Name_Result: self.QA_Result_Error, self.Name_Group: self.QA_Group_Data_Integrity, self.Name_Message: '本文件缺少业务元数据' } ) else: parser.metadata.quality.append_total_quality( { self.Name_FileName: self.metadata_bus_src_filename_with_path, self.Name_ID: 'metadata_file', self.Name_Title: '元数据文件', self.Name_Result: self.QA_Result_Pass, self.Name_Group: self.QA_Group_Data_Integrity, self.Name_Message: '业务元数据[{0}]存在'.format( CFile.file_name(self.metadata_bus_src_filename_with_path) ) } ) letter_location_FenFu = file_path.find('FenFu') file_path_sub = file_path[letter_location_FenFu + 6:] letter_location = file_path_sub.find(CFile.sep()) shp_path = CFile.join_file(file_path[:letter_location_FenFu + 5], file_path_sub[:letter_location]) shp_list = CFile.file_or_subpath_of_path(shp_path, '(?i).shp$', CFile.MatchType_Regex) if len(shp_list) == 0: parser.metadata.quality.append_total_quality( { self.Name_FileName: '', self.Name_ID: 'shp_file', self.Name_Title: '影像时相接边图', self.Name_Result: self.QA_Result_Warn, self.Name_Group: self.QA_Group_Data_Integrity, self.Name_Message: '本文件缺少影像时相接边图' } ) else: parser.metadata.quality.append_total_quality( { self.Name_FileName: shp_list[0], self.Name_ID: 'shp_file', self.Name_Title: '影像时相接边图', self.Name_Result: self.QA_Result_Pass, self.Name_Group: self.QA_Group_Data_Integrity, self.Name_Message: '影像时相接边图[{0}]存在'.format(shp_list[0]) } )
def __a_check_file_size__(cls, result_template: dict, file_name_with_path: str, size_min: int, size_max: int): """ 根据规则, 验证文件大小的合法性 :param result_template 检查结果的模板 :param file_name_with_path: 文件名 :param size_min: 最小要求, -1表示忽略比较 :param size_max: 最大要求, -1表示忽略比较 :return: """ result_dict = copy.deepcopy(result_template) file_size = CFile.file_size(file_name_with_path) if size_min != -1 and size_min != -1: if size_min <= file_size <= size_max: result_dict[ cls. Name_Message] = '文件[{0}]的大小[{1}]在指定的[{2}-{3}]范围内, 符合要求!'.format( CFile.file_name(file_name_with_path), file_size, size_min, size_max) result_dict[cls.Name_Result] = cls.QA_Result_Pass else: result_dict[ cls. Name_Message] = '文件[{0}]的大小[{1}]在指定的[{2}-{3}]范围外, 请检查!'.format( CFile.file_name(file_name_with_path), file_size, size_min, size_max) elif size_min != -1: if size_min <= file_size: result_dict[ cls. Name_Message] = '文件[{0}]的大小[{1}]大于最小值[{2}], 符合要求!'.format( CFile.file_name(file_name_with_path), file_size, size_min) result_dict[cls.Name_Result] = cls.QA_Result_Pass else: result_dict[ cls. Name_Message] = '文件[{0}]的大小[{1}]低于最小值[{2}], 请检查!'.format( CFile.file_name(file_name_with_path), file_size, size_min) elif size_max != -1: if size_max >= file_size: result_dict[ cls. Name_Message] = '文件[{0}]的大小[{1}]低于最大值[{2}], 符合要求!'.format( CFile.file_name(file_name_with_path), file_size, size_max) result_dict[cls.Name_Result] = cls.QA_Result_Pass else: result_dict[ cls. Name_Message] = '文件[{0}]的大小[{1}]超过最大值[{2}], 请检查!'.format( CFile.file_name(file_name_with_path), file_size, size_max) else: result_dict[ cls.Name_Message] = '文件[{0}]的大小[{1}]未给定限定范围, 默认符合要求!'.format( CFile.file_name(file_name_with_path), file_size) result_dict[cls.Name_Result] = cls.QA_Result_Pass return result_dict
def __a_check_file_format__(cls, result_template: dict, file_name_with_path: str, file_format: str): result_dict = copy.deepcopy(result_template) if CUtils.equal_ignore_case(file_format, cls.MetaDataFormat_XML): try: CXml().load_file(file_name_with_path) result_dict[cls.Name_Message] = '文件[{0}]为合法的XML, 符合要求!'.format( CFile.file_name(file_name_with_path)) result_dict[cls.Name_Result] = cls.QA_Result_Pass except: result_dict[cls.Name_Message] = '文件[{0}]不是合法的XML, 请检查!'.format( CFile.file_name(file_name_with_path)) elif CUtils.equal_ignore_case(file_format, cls.MetaDataFormat_Json): try: CJson().load_file(file_name_with_path) result_dict[ cls.Name_Message] = '文件[{0}]为合法的JSON, 符合要求!'.format( CFile.file_name(file_name_with_path)) result_dict[cls.Name_Result] = cls.QA_Result_Pass except: result_dict[ cls.Name_Message] = '文件[{0}]不是合法的JSON, 请检查!'.format( CFile.file_name(file_name_with_path)) elif CUtils.equal_ignore_case(file_format, cls.DataFormat_Vector_File): # 判断是否能正常打开矢量数据文件 file_name_with_path is_file_can_read = CGdalUtils.is_vector_file_can_read_process( file_name_with_path) if is_file_can_read: result_dict[ cls.Name_Message] = '文件[{0}]为合法的矢量数据, 符合要求!'.format( CFile.file_name(file_name_with_path)) result_dict[cls.Name_Result] = cls.QA_Result_Pass else: result_dict[ cls.Name_Message] = '文件[{0}]不是合法的矢量数据, 请检查!'.format( CFile.file_name(file_name_with_path)) elif CUtils.equal_ignore_case(file_format, cls.DataFormat_Vector_Dataset): """ todo 判断是否能正常打开矢量数据集 file_name_with_path """ is_can_read = CGdalUtils.is_vector_dataset_can_read_process( file_name_with_path) if is_can_read: result_dict[ cls.Name_Message] = '文件[{0}]为合法的矢量数据集, 符合要求!'.format( CFile.file_name(file_name_with_path)) result_dict[cls.Name_Result] = cls.QA_Result_Pass else: result_dict[ cls.Name_Message] = '文件[{0}]不是合法的矢量数据集, 请检查!'.format( CFile.file_name(file_name_with_path)) elif CUtils.equal_ignore_case(file_format, cls.DataFormat_Raster_File): # 判断是否能正常打开影像数据文件 file_name_with_path is_file_can_read = CGdalUtils.is_raster_file_can_read_process( file_name_with_path) if is_file_can_read: result_dict[ cls.Name_Message] = '文件[{0}]为合法的影像数据, 符合要求!'.format( CFile.file_name(file_name_with_path)) result_dict[cls.Name_Result] = cls.QA_Result_Pass else: result_dict[ cls.Name_Message] = '文件[{0}]不是合法的影像数据, 请检查!'.format( CFile.file_name(file_name_with_path)) else: result_dict[cls.Name_Message] = '文件[{0}]未给定格式要求, 默认符合要求!'.format( CFile.file_name(file_name_with_path)) result_dict[cls.Name_Result] = cls.QA_Result_Pass return result_dict