def import_data(self, data_source: CDataSetSeqReader, data_target: CTable) -> str: success_record_count = 0 if not data_source.first(): return CResult.merge_result( self.Success, '数据源无有效导入数据, 系统自动设定导入成功! ' ) while True: try: result = self.__import_each_record(data_source, data_target) if not CResult.result_success(result): return result except Exception as error: return CResult.merge_result( self.Failure, '第{0}条数据入库失败, 详细错误原因为: {1}!'.format(success_record_count, error.__str__()) ) success_record_count = success_record_count + 1 if not data_source.next(): break return CResult.merge_result( self.Success, '数据源的全部数据导入成功, 共导入记录数[{0}]! '.format(success_record_count) )
def update_ib_data_status_in_core_or_mix_storage(self, ib_id, storage_id, ib_directory_name, ib_dir_id): """ 如果是在线存储或混合存储, 直接将业务状态修改即可 :param ib_id: :param ib_dir_id: :param storage_id: :param ib_directory_name: :return: """ sql_update_file = ''' update dm2_storage_file set dsf_bus_status = '{0}' where dsf_ib_id = :ib_id '''.format(self.IB_Bus_Status_Online) params_update_file = {'ib_id': ib_id} # 更新子目录状态 sql_update_directory = ''' update dm2_storage_directory set dsd_bus_status = '{0}' where dsd_ib_id = :ib_id '''.format(self.IB_Bus_Status_Online) params_update_directory = {'ib_id': ib_id} # 更新对象状态 sql_update_object = ''' update dm2_storage_object set dso_bus_status = '{0}' where dso_ib_id = :ib_id '''.format(self.IB_Bus_Status_Online) params_update_object = {'ib_id': ib_id} # 将入库记录中的目标存储标识进行更新 sql_update_ib_target_storage = ''' update dm2_storage_inbound set dsitargetstorageid = :target_storage_id where dsiid = :ib_id ''' params_update_ib_target_storage = { 'target_storage_id': storage_id, 'ib_id': ib_id } commands = [(sql_update_file, params_update_file), (sql_update_directory, params_update_directory), (sql_update_object, params_update_object), (sql_update_ib_target_storage, params_update_ib_target_storage)] try: CFactory().give_me_db( self.get_mission_db_id()).execute_batch(commands) return CResult.merge_result( self.Success, '存储[{0}]下的数据[{1}]入库成功!'.format(storage_id, ib_directory_name)) except Exception as error: return CResult.merge_result( self.Failure, '存储[{0}]下的数据[{1}]入库成功失败, 错误原因为: [{2}]!'.format( storage_id, ib_directory_name, error.__str__()))
def sync(self) -> str: try: result = self.process_metadata_bus_dict() if not CResult.result_success(result): return CResult.merge_result( self.Failure, '卫星数据的业务元数据的详细内容解析出错!原因为{0}'.format( CResult.result_message(result))) main_result = self.process_main_table() metadata_result = self.process_metadata_table() ndi_result = self.process_ndi_table() if not CResult.result_success(main_result): return main_result elif not CResult.result_success(metadata_result): return metadata_result elif not CResult.result_success(ndi_result): return ndi_result else: return CResult.merge_result( self.Success, '对象[{0}]的同步成功! '.format(self._obj_name)) except Exception as error: return CResult.merge_result( self.Failure, '数据检索分发模块在进行数据同步时出现错误:同步的对象[{0}]在处理时出现异常, 详细情况: [{1}]!'.format( self._obj_name, error.__str__()))
def init_metadata_bus(self, parser: CMetaDataParser) -> str: """ 提取xml格式的业务元数据, 加载到parser的metadata对象中 :param parser: :return: """ if not CFile.file_or_path_exist(self.__bus_metadata_xml_file_name__): return CResult.merge_result( self.Failure, '元数据文件[{0}]不存在, 无法解析! '.format( self.__bus_metadata_xml_file_name__)) try: parser.metadata.set_metadata_bus_file( self.Success, '元数据文件[{0}]成功加载! '.format(self.__bus_metadata_xml_file_name__), self.MetaDataFormat_XML, self.__bus_metadata_xml_file_name__) return CResult.merge_result( self.Success, '元数据文件[{0}]成功加载! '.format(self.__bus_metadata_xml_file_name__)) except: parser.metadata.set_metadata_bus( self.Failure, '元数据文件[{0}]格式不合法, 无法处理! '.format( self.__bus_metadata_xml_file_name__), self.MetaDataFormat_Text, '') return CResult.merge_result( self.Exception, '元数据文件[{0}]格式不合法, 无法处理! '.format( self.__bus_metadata_xml_file_name__))
def process(self) -> str: """ todo 负责人 赵宇飞 在这里提取影像数据的空间信息, 以文件形式存储在self.file_content.work_root_dir下 注意返回的串中有空间信息的文件名 注意: 如果出现内存泄漏现象, 则使用新建进程提取元数据, 放置到文件中, 在本进程中解析元数据!!! :return: """ result_process = self.process_raster() if CResult.result_success(result_process): file_path = self.file_content.work_root_dir dict_temp_file_name = { self.Name_Native_Center: '{0}_native_center.wkt'.format(self.object_name), self.Name_Native_BBox: '{0}_native_bbox.wkt'.format(self.object_name), self.Name_Native_Geom: '{0}_native_geom.wkt'.format(self.object_name), self.Name_Wgs84_Center: '{0}_wgs84_center.wkt'.format(self.object_name), self.Name_Wgs84_BBox: '{0}_wgs84_bbox.wkt'.format(self.object_name), self.Name_Wgs84_Geom: '{0}_wgs84_geom.wkt'.format(self.object_name) } dict_temp_prj_name = { self.Name_Prj_Wkt: CResult.result_info(result_process, self.Name_Prj_Wkt, None), self.Name_Prj_Proj4: CResult.result_info(result_process, self.Name_Prj_Proj4, None), self.Name_Prj_Project: CResult.result_info(result_process, self.Name_Prj_Project, None), self.Name_Prj_Coordinate: CResult.result_info(result_process, self.Name_Prj_Coordinate, None), self.Name_Prj_Source: CResult.result_info(result_process, self.Name_Prj_Source, None), self.Name_Prj_Zone: CResult.result_info(result_process, self.Name_Prj_Zone, None), self.Name_Prj_Degree: CResult.result_info(result_process, self.Name_Prj_Degree, None) } result = CResult.merge_result(self.Success, '处理完毕!') for file_type, file_name in dict_temp_file_name.items(): result = CResult.merge_result_info(result, file_type, CFile.join_file(file_path, file_name)) for prj_type, prj_name in dict_temp_prj_name.items(): result = CResult.merge_result_info(result, prj_type, prj_name) else: result = CResult.merge_result(self.Failure, CResult.result_message(result_process)) return result
def process_mission(self, dataset) -> str: ds_id = dataset.value_by_name(0, 'query_dir_id', '') ds_storage_id = dataset.value_by_name(0, 'query_storage_id', '') inbound_id = dataset.value_by_name(0, 'query_dir_ib_id', None) ds_subpath = dataset.value_by_name(0, 'query_subpath', '') ds_root_path = dataset.value_by_name(0, 'query_rootpath', '') ds_retry_times = dataset.value_by_name(0, 'retry_times', 0) if ds_retry_times >= self.abnormal_job_retry_times(): ds_last_process_memo = CUtils.any_2_str( dataset.value_by_name(0, 'last_process_memo', None)) process_result = CResult.merge_result( self.Failure, '{0}, \n系统已经重试{1}次, 仍然未能解决, 请人工检查修正后重试!'.format( ds_last_process_memo, ds_retry_times)) self.update_dir_status(ds_id, process_result, self.ProcStatus_Error) return process_result if ds_subpath == '': ds_subpath = ds_root_path else: ds_subpath = CFile.join_file(ds_root_path, ds_subpath) # 将所有子目录, 文件的可用性, 都改为未知!!!! self.init_file_or_subpath_valid_unknown(ds_id) try: sql_get_rule = ''' select dsdScanRule from dm2_storage_directory where dsdStorageid = :dsdStorageID and position((dsddirectory || '{0}') in :dsdDirectory) = 1 and dsdScanRule is not null order by dsddirectory desc limit 1 '''.format(CFile.sep()) rule_ds = CFactory().give_me_db(self.get_mission_db_id()).one_row( sql_get_rule, { 'dsdStorageID': ds_storage_id, 'dsdDirectory': ds_subpath }) ds_rule_content = rule_ds.value_by_name(0, 'dsScanRule', '') CLogger().debug('处理的目录为: {0}'.format(ds_subpath)) self.parser_file_or_subpath_of_path(dataset, ds_id, ds_subpath, ds_rule_content, inbound_id) result = CResult.merge_result( self.Success, '目录为[{0}]下的文件和子目录扫描处理成功!'.format(ds_subpath)) self.update_dir_status(ds_id, result) return result except Exception as err: result = CResult.merge_result( self.Failure, '目录为[{0}]下的文件和子目录扫描处理出现错误!错误原因为: {1}'.format( ds_subpath, err.__str__())) self.update_dir_status(ds_id, result) return result finally: self.exchange_file_or_subpath_valid_unknown2invalid(ds_id)
def __inbound_object_detail_of_schema(self, list_file_fullname): sql_detail_insert = ''' INSERT INTO dm2_storage_obj_detail( dodid, dodobjectid, dodfilename, dodfileext, dodfilesize, dodfilecreatetime, dodfilemodifytime, dodlastmodifytime, dodfiletype) VALUES ( :dodid, :dodobjectid, :dodfilename, :dodfileext, :dodfilesize, :dodfilecreatetime, :dodfilemodifytime, now(), :dodfiletype) ''' sql_detail_insert_params_list = [] # query_storage_id = self.file_info.storage_id query_file_relation_name = self.file_info.file_name_with_rel_path for item_file_name_with_path in list_file_fullname: CLogger().debug(item_file_name_with_path) if not CFile.file_or_path_exist(item_file_name_with_path): continue params = dict() file_relation_name = CFile.file_relation_path( item_file_name_with_path, self.file_info.root_path) if CUtils.equal_ignore_case(query_file_relation_name, file_relation_name): params['dodid'] = self.object_id else: params['dodid'] = CUtils.one_id() # 文件类型 params['dodfiletype'] = self.FileType_File if CFile.is_dir(item_file_name_with_path): params['dodfiletype'] = self.FileType_Dir params['dodobjectid'] = self.object_id params['dodfilename'] = CFile.unify(file_relation_name) params['dodfileext'] = CFile.file_ext(item_file_name_with_path) params['dodfilesize'] = CFile.file_size(item_file_name_with_path) params['dodfilecreatetime'] = CFile.file_create_time( item_file_name_with_path) params['dodfilemodifytime'] = CFile.file_modify_time( item_file_name_with_path) # params['dodstorageid'] = query_storage_id # params['dodfilerelationname'] = CFile.file_relation_path( # item_file_name_with_path, # self.file_info.root_path) sql_params_tuple = (sql_detail_insert, params) sql_detail_insert_params_list.append(sql_params_tuple) if len(sql_detail_insert_params_list) > 0: try: CFactory().give_me_db( self.file_info.db_server_id).execute_batch( sql_detail_insert_params_list) except Exception as error: CLogger().warning('数据库处理出现异常, 错误信息为: {0}'.format( error.__str__())) return CResult.merge_result(self.Failure, '处理失败!') return CResult.merge_result(self.Success, '处理完毕!')
def execute(self) -> str: mission_data = self.get_mission_info() if mission_data is None: return CResult.merge_result(CResult.Failure, '任务配置异常, 系统无法处理该任务!') if not mission_data.is_empty(): return self.process_mission(mission_data) else: return CResult.merge_result(CResult.Failure, '没有可执行的任务!')
def check_all_ib_file_or_path_existed(self, ib_id): """ 判断待入库数据的元数据, 与实体数据是否相符 . 返回CResult . 如果全部相符, 则返回True . 如果有任何一个不符, 则返回False, 且把不符的文件名通过信息返回 :param ib_id: :return: """ invalid_file_list = [] more_failure_file = False sql_all_ib_file = ''' select coalesce(dm2_storage.dstownerpath, dm2_storage.dstunipath) || dm2_storage_file.dsffilerelationname as file_name , dm2_storage_file.dsffilesize as file_size , dm2_storage_file.dsffilemodifytime as file_modify_time from dm2_storage_file left join dm2_storage on dm2_storage.dstid = dm2_storage_file.dsfstorageid where dsf_ib_id = :ib_id ''' params_all_ib_file = {'ib_id': ib_id} ds_ib_file = CFactory().give_me_db(self.get_mission_db_id()).all_row( sql_all_ib_file, params_all_ib_file) for ds_ib_file_index in range(ds_ib_file.size()): file_valid = True file_name = ds_ib_file.value_by_name(ds_ib_file_index, 'file_name', '') if not CUtils.equal_ignore_case(file_name, ''): if not CFile.file_or_path_exist(file_name): file_valid = False elif not CUtils.equal_ignore_case( CFile.file_modify_time(file_name), ds_ib_file.value_by_name(ds_ib_file_index, 'file_modify_time', '')): file_valid = False elif CFile.file_size(file_name) != ds_ib_file.value_by_name( ds_ib_file_index, 'file_size', 0): file_valid = False if not file_valid: if len(invalid_file_list) <= 3: invalid_file_list.append(file_name) else: more_failure_file = True break if len(invalid_file_list) > 0: message = '' for invalid_file in invalid_file_list: message = CUtils.str_append(message, invalid_file) if more_failure_file: message = CUtils.str_append(message, '...') message = CUtils.str_append(message, '上述数据与库中记录不统一, 请重新扫描入库! ') return CResult.merge_result(self.Failure, message) else: return CResult.merge_result(self.Success, '所有文件均存在, 且与库中记录统一! ')
def delete_data(self, session: Session = None) -> str: try: sql_text, sql_params = self.__prepare_delete() if session is None: self.__database.execute(sql_text, sql_params) else: self.__database.session_execute(session, sql_text, sql_params) return CResult.merge_result(CResult.Success) except Exception as error: return CResult.merge_result(CResult.Failure, error.__str__())
def __stat_object_detail_of_schema(self) -> str: """ 将数据附属文件的统计信息入库 . 仅适用于Directory_Itself模式 :return: """ result_sub_dir_count, result_file_count, result_file_size_sum = CFile.stat_of_path( self.__detail_file_path__, self.__detail_file_recurse__, self.__detail_file_match_text__, self.__detail_file_match_type__) query_file_relation_name = self.file_info.file_name_with_rel_path params = dict() file_relation_name = CFile.file_relation_path( self.__detail_file_path__, self.file_info.root_path) if CUtils.equal_ignore_case(query_file_relation_name, file_relation_name): params['dodid'] = self.object_id else: params['dodid'] = CUtils.one_id() params['dodfiletype'] = self.FileType_Dir params['dodfileext'] = None if CFile.is_file(self.__detail_file_path__): params['dodfiletype'] = self.FileType_File params['dodfileext'] = CFile.file_ext(self.__detail_file_path__) params['dodobjectid'] = self.object_id params['dodfilename'] = CFile.unify(file_relation_name) params['doddircount'] = result_sub_dir_count params['dodfilecount'] = result_file_count params['dodfilesize'] = result_file_size_sum params['dodfilecreatetime'] = CFile.file_create_time( self.__detail_file_path__) params['dodfilemodifytime'] = CFile.file_modify_time( self.__detail_file_path__) try: CFactory().give_me_db(self.file_info.db_server_id).execute( ''' INSERT INTO dm2_storage_obj_detail( dodid, dodobjectid, dodfilename, dodfileext, dodfilesize, doddircount, dodfilecount, dodfilecreatetime, dodfilemodifytime, dodlastmodifytime, dodfiletype) VALUES ( :dodid, :dodobjectid, :dodfilename, :dodfileext, :dodfilesize, :doddircount, :dodfilecount, :dodfilecreatetime, :dodfilemodifytime, now(), :dodfiletype) ''', params) return CResult.merge_result(self.Success, '处理完毕!') except Exception as error: CLogger().warning('数据库处理出现异常, 错误信息为: {0}'.format(error.__str__())) return CResult.merge_result( self.Failure, '数据库处理出现异常, 错误信息为: {0}'.format(error.__str__()))
def update_data(self, session: Session = None) -> str: try: sql_list = self.__prepare_update_data() if session is None: self.__database.execute_batch(sql_list) else: self.__database.session_execute_batch(session, sql_list) return CResult.merge_result(CResult.Success) except Exception as error: return CResult.merge_result(CResult.Failure, error.__str__())
def _do_sync(self) -> str: try: table_name = CUtils.dict_value_by_name(self.information(), 'table_name', '') # 因此类插件的表格情况特殊,为双主键,且要先确定插入还是更新,所以不用table.if_exists()方法 sql_check = ''' select aprid from {0} where aprid='{1}' '''.format(table_name, self._obj_id) record_cheak = CFactory().give_me_db(self._db_id).one_row(sql_check).size() # 查找记录数 if record_cheak == 0: # 记录数为0则拼接插入语句 insert_or_updata = self.DB_True else: # 记录数不为0则拼接更新语句 insert_or_updata = self.DB_False table = CTable() table.load_info(self._db_id, table_name) # insert_or_updatad的意义是要先确定是更新还是插入,不能把不该更新的,在插入时是默认值的参数更新 for field_dict in self.get_sync_dict_list(insert_or_updata): field_name = CUtils.dict_value_by_name(field_dict, 'field_name', '') # 获取字段名 field_value = CUtils.dict_value_by_name(field_dict, 'field_value', '') # 获取字段值 field_value_type = CUtils.dict_value_by_name(field_dict, 'field_value_type', '') # 获取值类型 if CUtils.equal_ignore_case(field_value, ''): table.column_list.column_by_name(field_name).set_null() elif CUtils.equal_ignore_case(field_value_type, self.DataValueType_Value): table.column_list.column_by_name(field_name).set_value(field_value) elif CUtils.equal_ignore_case(field_value_type, self.DataValueType_SQL): table.column_list.column_by_name(field_name).set_sql(field_value) elif CUtils.equal_ignore_case(field_value_type, self.DataValueType_Array): table.column_list.column_by_name(field_name).set_array(field_value) else: pass # 不多执行table.if_exists()多查一次哭,所以不用savedata()方法 if insert_or_updata: result = table.insert_data() else: result = table.update_data() if CResult.result_success(result): return CResult.merge_result( self.Success, '对象[{0}]的同步成功! '.format(self._obj_name) ) else: return result except Exception as error: return CResult.merge_result( self.Failure, '数据检索分发模块在进行数据同步时出现错误:同步的对象[{0}]在处理时出现异常, 详细情况: [{1}]!'.format( self._obj_name, error.__str__() ) )
def process_mission(self, dataset) -> str: """ :param dataset: :return: """ ds_na_id = dataset.value_by_name(0, 'na_id', '') ds_app_id = dataset.value_by_name(0, 'app_id', '') ds_object_id = dataset.value_by_name(0, 'object_id', '') ds_object_type = dataset.value_by_name(0, 'object_type', '') ds_object_name = dataset.value_by_name(0, 'object_name', '') ds_object_access = dataset.value_by_name(0, 'object_access', self.DataAccess_Forbid) CLogger().debug('与第三方模块[{0}]同步的对象为: [{1}]'.format( ds_app_id, ds_object_name)) try: module_file_name = CFile.join_file( CSys.get_metadata_data_access_modules_root_dir(), '{0}.{1}'.format(ds_app_id, self.FileExt_Py)) if not CFile.file_or_path_exist(module_file_name): message = '第三方模块[{0}]没有设置对应的算法, 直接通过!'.format(ds_app_id) result = CResult.merge_result(self.Success, message) self.update_sync_result(ds_na_id, result) return result module_obj = CObject.create_module_instance( CSys.get_metadata_data_access_modules_root_name(), ds_app_id, self.get_mission_db_id()) if module_obj is None: message = '第三方模块[{0}]没有设置对应的算法, 直接通过!'.format(ds_app_id) result = CResult.merge_result(self.Success, message) self.update_sync_result(ds_na_id, result) return result module_title = CUtils.dict_value_by_name(module_obj.information(), self.Name_Title, '') result = module_obj.sync(ds_object_access, ds_object_id, ds_object_name, ds_object_type, None) self.update_sync_result(ds_na_id, result) return result except Exception as error: result = CResult.merge_result( self.Failure, '与第三方模块[{0}]同步的对象: [{1}]的同步过程出现异常, 详细情况: [{2}]!'.format( ds_app_id, ds_object_name, error.__str__())) self.update_sync_result(ds_na_id, result) return result
def save_metadata_time(self) -> str: """ 完成时间元数据的入库更新操作 :return: """ mdt_ext_result, mdt_ext_memo, mdt_ext_content = self.metadata.metadata_time() if mdt_ext_result == self.DB_False: mdt_ext_content = None if CUtils.equal_ignore_case(mdt_ext_result, ''): mdt_ext_content = None # None相当于sql中的null,可以插入数据库中,而''不能插入jsonb字段中 # 所有元数据入库 CFactory().give_me_db(self.file_info.db_server_id).execute( ''' update dm2_storage_object set dso_time_result = :dso_time_result , dso_time_parsermemo = :dso_time_parsermemo , dso_time = :dso_time where dsoid = :dsoid ''', { 'dsoid': self.object_id, 'dso_time_result': mdt_ext_result, 'dso_time_parsermemo': mdt_ext_memo, 'dso_time': mdt_ext_content } ) return CResult.merge_result(self.Success, '时间元数据处理完毕!')
def parser_metadata_spatial_after_qa(self, parser: CMetaDataParser): """ 在这里直接指定坐标系 """ result = super().parser_metadata_spatial_after_qa(parser) try: Prj_Project = CUtils.dict_value_by_name(self.get_information(), self.Plugins_Info_Coordinate_System, '') if not CUtils.equal_ignore_case(Prj_Project, ''): parser.metadata.set_metadata_spatial( self.DB_True, '元数据文件[{0}]成功加载! '.format(self.file_info.file_name_with_full_path), self.Spatial_MetaData_Type_Prj_Project, Prj_Project ) parser.metadata.set_metadata_spatial( self.DB_True, '元数据文件[{0}]成功加载! '.format(self.file_info.file_name_with_full_path), self.Spatial_MetaData_Type_Prj_Source, self.Prj_Source_Custom ) except Exception as error: parser.metadata.set_metadata_spatial( self.DB_False, '元数据文件[{0}]格式不合法, 无法处理! 详细错误为: {1}'.format(self.file_info.file_name_with_full_path, error.__str__()), self.MetaDataFormat_Text, '') return CResult.merge_result(self.Exception, '元数据文件[{0}]格式不合法, 无法处理! '.format( self.file_info.file_name_with_full_path)) return result
def process_metadata_bus_dict(self): dataset = self._dataset class_plugins = self.get_class_plugins() try: dsometadataxml_xml = CXml() dsometadataxml = dataset.value_by_name(0, 'dsometadataxml_bus', '') dsometadataxml_xml.load_xml(dsometadataxml) view_path = settings.application.xpath_one( self.Path_Setting_MetaData_Dir_View, None) browser_path = CFile.file_path( dataset.value_by_name(0, 'dso_browser', None)) multiple_metadata_bus_filename_dict = \ class_plugins.get_multiple_metadata_bus_filename_with_path( CFile.join_file(view_path, browser_path) ) result, metadata_bus_dict = class_plugins.metadata_bus_xml_to_dict( dsometadataxml_xml, multiple_metadata_bus_filename_dict) self.set_metadata_bus_dict(metadata_bus_dict) return result except Exception as error: return CResult.merge_result( self.Failure, '卫星数据的业务元数据的详细内容解析出错!原因为{0}'.format(error.__str__()))
def process(self) -> str: """ 在这里处理将__file_info__中记录的对象所对应的文件或目录信息, 根据__detail_*变量的定义, 进行目录扫描, 记录到dm2_storage_object_detail中 :return: """ self._before_process() if self._only_stat_file: result = self.__stat_object_detail_of_schema() if not CResult.result_success(result): return result else: if not CUtils.equal_ignore_case(self.__detail_file_path__, ''): list_file_fullname = CFile.file_or_dir_fullname_of_path( self.__detail_file_path__, self.__detail_file_recurse__, self.__detail_file_match_text__, self.__detail_file_match_type__) result = self.__inbound_object_detail_of_schema( list_file_fullname) if not CResult.result_success(result): return result if len(self._file_custom_list) > 0: return self.inbound_object_detail_of_custom(self._file_custom_list) else: return CResult.merge_result(self.Success, '对象附属文件处理成功结束!')
def process(self) -> str: """ :return: """ return CResult.merge_result( self.Success, '文件[{0}]成功加载! '.format(self.transformer_src_filename))
def save_metadata_view(self) -> str: """ 完成可视元数据的入库更新操作 :return: """ mdt_view_result, mdt_view_memo, mdt_view_thumb_file, mdt_view_browse_file = self.metadata.metadata_view() if mdt_view_result == self.DB_False: mdt_view_thumb_file = None mdt_view_browse_file = None # 所有元数据入库 CFactory().give_me_db(self.file_info.db_server_id).execute( ''' update dm2_storage_object set dso_view_result = :dso_view_result , dso_view_parsermemo = :dso_view_parsermemo , dso_browser = :dso_browser , dso_thumb = :dso_thumb where dsoid = :dsoid ''', { 'dsoid': self.object_id, 'dso_view_result': mdt_view_result, 'dso_view_parsermemo': mdt_view_memo, 'dso_browser': mdt_view_browse_file, 'dso_thumb': mdt_view_thumb_file } ) return CResult.merge_result(self.Success, '可视化元数据处理完毕!')
def __import_each_record(self, data_source: CDataSetSeqReader, data_target: CTable) -> str: data_target.column_list.reset() data_source_record = data_source.record_as_dict() for column_index in range(data_target.column_list.size()): column_obj = data_target.column_list.column_by_index(column_index) column_name = column_obj.name column_data_set_method = self.__find_column_data_set_method(column_obj, data_source) if column_data_set_method is None: continue column_value_set_type = CUtils.dict_value_by_name(column_data_set_method, self.Name_Type, self.Name_Common) if CUtils.equal_ignore_case(column_value_set_type, self.Name_SQL): column_value_template = CUtils.dict_value_by_name(column_data_set_method, self.Name_Value, '') column_value_template = self.replace_placeholder(column_value_template) column_value_template = CUtils.replace_placeholder(column_value_template, data_source_record) column_obj.set_sql(column_value_template) elif CUtils.equal_ignore_case(column_value_set_type, self.Name_Geometry): column_value_template = CUtils.dict_value_by_name(column_data_set_method, self.Name_Value, '') column_value_template = self.replace_placeholder(column_value_template) column_value_template = CUtils.replace_placeholder(column_value_template, data_source_record) column_obj.set_geometry( column_value_template, CUtils.dict_value_by_name(column_data_set_method, self.Name_Srid, None) ) elif CUtils.equal_ignore_case(column_value_set_type, self.Name_Array): column_value_template = CUtils.dict_value_by_name(column_data_set_method, self.Name_Value, None) if isinstance(column_value_template, list): column_obj.set_array(column_value_template) else: column_value_template = CUtils.any_2_str(column_value_template) column_value_template = self.replace_placeholder(column_value_template) column_value_template = CUtils.replace_placeholder(column_value_template, data_source_record) column_obj.set_array_str(column_value_template) elif CUtils.equal_ignore_case(column_value_set_type, self.Name_Null): column_obj.set_null() elif CUtils.equal_ignore_case(column_value_set_type, self.Name_File): column_value_template = CUtils.dict_value_by_name(column_data_set_method, self.Name_Value, '') column_value_template = self.replace_placeholder(column_value_template) column_value_template = CUtils.replace_placeholder(column_value_template, data_source_record) file_format = CUtils.dict_value_by_name(column_data_set_method, self.Name_Format, self.Name_Binary) file_format = self.replace_placeholder(file_format) file_format = CUtils.replace_placeholder(file_format, data_source.record_as_dict()) file_encoding = CUtils.dict_value_by_name(column_data_set_method, self.Name_Encoding, self.Encoding_UTF8) file_encoding = self.replace_placeholder(file_encoding) file_encoding = CUtils.replace_placeholder(file_encoding, data_source_record) column_obj.set_value_from_file(column_value_template, file_format, file_encoding) else: column_value_template = CUtils.dict_value_by_name(column_data_set_method, self.Name_Value, '') column_value_template = self.replace_placeholder(column_value_template) column_value_template = CUtils.replace_placeholder(column_value_template, data_source_record) column_obj.set_value(column_value_template) data_target.save_data() return CResult.merge_result(self.Success, '数据入库成功!')
def execute(self) -> str: inbound_storage_list = CFactory().give_me_db( self.get_mission_db_id()).all_row(''' select dstid, dsttitle from dm2_storage where dsttype = '{0}' and dstscanstatus = {1} '''.format(self.Storage_Type_InBound, self.ProcStatus_InQueue)) if inbound_storage_list.is_empty(): return CResult.merge_result(CResult.Success, '本次没有发现需要启动入库的任务!') for data_index in range(inbound_storage_list.size()): storage_id = inbound_storage_list.value_by_name( data_index, 'dstid', '') storage_title = inbound_storage_list.value_by_name( data_index, 'dsttitle', '') CLogger().debug('正在检查和启动存储[{0}]的定时扫描...'.format(storage_title)) try: if self.inbound_mission_existed(storage_id): self.update_storage_status( storage_id, self.ProcStatus_Finished, '当前存储下发现正在进行中的入库任务, 本次定时扫描将被忽略! ') else: self.create_inbound_mission(storage_id) self.update_storage_status(storage_id, self.ProcStatus_Finished, '系统已创建入库批次, 启动扫描! ') except Exception as error: CFactory().give_me_db(self.get_mission_db_id()).execute( ''' update dm2_storage set dstscanstatus = {0} , dstlastmodifytime=now() , dstscanmemo=:message where dstid = :storage_id '''.format(self.ProcStatus_Error), { 'storage_id': storage_id, 'message': '系统启动扫描任务过程中出现错误, 详细信息为: {0}!'.format(error.__str__()) }) continue return CResult.merge_result(self.Success, '本次分析定时扫描任务成功结束!')
def notify_inbound(self, inbound_id: str) -> str: """ 批次通知 """ return CResult.merge_result( self.Success, '批次[{0}]已经推送给模块[{1}]队列! '.format( inbound_id, CUtils.dict_value_by_name(self.information(), self.Name_Title, '')))
def process(self) -> str: try: data_source = self.open_source() data_target = self.open_target() return self.import_data(data_source, data_target) except Exception as error: result_message = '数据导出过程出现错误, 详细错误为: {0}'.format(error.__str__()) CLogger().debug(result_message) return CResult.merge_result(self.Failure, result_message)
def process(self) -> str: """ :return: """ super().process() file_metadata_name_with_path = self.transformer_src_filename try: if CUtils.equal_ignore_case(self.transformer_type, self.Transformer_DOM_MDB): xml_obj = self.mdb_to_xml(file_metadata_name_with_path) elif CUtils.equal_ignore_case(self.transformer_type, self.Transformer_DOM_MAT): xml_obj = self.mat_to_xml(file_metadata_name_with_path) elif CUtils.equal_ignore_case(self.transformer_type, self.Transformer_DOM_XLS): xml_obj = self.xls_to_xml(file_metadata_name_with_path) elif CUtils.equal_ignore_case(self.transformer_type, self.Transformer_DOM_XLSX): xml_obj = self.xls_to_xml(file_metadata_name_with_path) else: raise if xml_obj is not None: super().metadata.set_metadata_bus( self.Success, '元数据文件[{0}]成功加载! '.format(file_metadata_name_with_path), self.MetaDataFormat_XML, xml_obj.to_xml()) return CResult.merge_result( self.Success, '元数据文件[{0}]成功加载! '.format(file_metadata_name_with_path)) else: raise except Exception as error: super().metadata.set_metadata_bus( self.Exception, '元数据文件[{0}]格式不合法, 无法处理! 错误原因为{1}'.format( file_metadata_name_with_path, error.__str__()), self.MetaDataFormat_Text, '') return CResult.merge_result( self.Exception, '元数据文件[{0}]格式不合法, 无法处理! 错误原因为{1}'.format( file_metadata_name_with_path, error.__str__()))
def ib_files_move(self, src_dir, dest_dir, only_move_subpath_and_file: bool = False): """ 移动源文件到目标路径下 . 注意: 在将源目录下的文件和子目录, 移动至目标目录下 todo(优化) 如何得知两个目录在一个存储上, 而使用目录的移动替代. 只有不同的存储, 才使用本方法!!! :param only_move_subpath_and_file: 是否仅仅移动目录下的文件和子目录 :param dest_dir: :param src_dir: :return: """ if only_move_subpath_and_file: result, failure_file_list = CFile.move_subpath_and_file_of_path_to( src_dir, dest_dir) else: result, failure_file_list = CFile.move_path_to(src_dir, dest_dir) if result: return CResult.merge_result( self.Success, '源目录[{0}]已经成功的, 完整的移动至目录[{1}]下! '.format(src_dir, dest_dir)) else: more_failure_file = False max_failure_file_count = len(failure_file_list) if max_failure_file_count > 3: max_failure_file_count = 3 more_failure_file = True message = '' for failure_file in failure_file_list: message = CUtils.str_append(message, failure_file) if more_failure_file: message = CUtils.str_append(message, '...') if max_failure_file_count > 0: message = CUtils.str_append(message, '上述数据向核心存储中迁移时出现错误, 请检查后重试入库! ') return CResult.merge_result( self.Failure, '源目录[{0}]想核心存储目录[{1}]下入库时出现错误! \n{2}'.format( src_dir, dest_dir, message))
def process(self) -> str: """ todo 负责人 赵宇飞 在这里提取矢量数据的快视图, 将元数据文件存储在self.file_content.view_root_dir下 注意返回的串中有快视图和拇指图的文件名 注意: 如果出现内存泄漏现象, 则使用新建进程提取元数据, 放置到文件中, 在本进程中解析元数据!!! :return: """ result = CResult.merge_result(self.Success, '处理完毕!') result = CResult.merge_result_info(result, self.Name_Browse, '/aa/bb_browse.png') result = CResult.merge_result_info(result, self.Name_Thumb, '/aa/bb_thumb.png') return result
def process(self) -> str: """ 完成 负责人 张源博、赵宇飞 在这里提取影像数据的快视图, 将元数据文件存储在self.file_content.view_root_dir下 注意返回的串中有快视图和拇指图的文件名 注意: 如果出现内存泄漏现象, 则使用新建进程提取元数据, 放置到文件中, 在本进程中解析元数据!!! :return: """ # 获取对象类型 type = 'default' group = 'default' catalog = 'default' # 构建数据对象object对应的识别插件,获取get_information里面的信息 class_classified_obj = CObject.get_plugins_instance_by_object_id(self.file_info.db_server_id, self.object_id) if class_classified_obj is not None: plugins_info = class_classified_obj.get_information() type = CUtils.dict_value_by_name(plugins_info, class_classified_obj.Plugins_Info_Type, 'default') group = CUtils.dict_value_by_name(plugins_info, class_classified_obj.Plugins_Info_Group, 'default') catalog = CUtils.dict_value_by_name(plugins_info, class_classified_obj.Plugins_Info_Catalog, 'default') create_time = CTime.today() create_format_time = CTime.format_str(create_time, '%Y%m%d') year = CTime.format_str(create_time, '%Y') month = CTime.format_str(create_time, '%m') day = CTime.format_str(create_time, '%d') sep = CFile.sep() # 操作系统的不同处理分隔符不同 sep_list = [catalog, group, type, year, month, day] relative_path_part = sep.join(sep_list) # 相对路径格式 view_relative_path_browse = r'{2}{0}{2}{1}_browse.png'.format(relative_path_part, self.object_id, sep) view_relative_path_thumb = r'{2}{0}{2}{1}_thumb.jpg'.format(relative_path_part, self.object_id, sep) view_relative_path_geotiff = r'{2}{0}{2}{1}_browse.tiff'.format(relative_path_part, self.object_id, sep) browse_full_path = CFile.join_file(self.file_content.view_root_dir, view_relative_path_browse) thumb_full_path = CFile.join_file(self.file_content.view_root_dir, view_relative_path_thumb) geotiff_full_path = CFile.join_file(self.file_content.view_root_dir, view_relative_path_geotiff) # 进程调用模式 json_out_view = CJson() json_out_view.set_value_of_name('image_path', self.file_info.file_name_with_full_path) json_out_view.set_value_of_name('browse_full_path', browse_full_path) json_out_view.set_value_of_name('thumb_full_path', thumb_full_path) json_out_view.set_value_of_name('geotiff_full_path', geotiff_full_path) result_view = CProcessUtils.processing_method(self.create_view_json, json_out_view) # result_view = self.create_view(self.file_info.file_name_with_full_path, browse_full_path, thumb_full_path, # geotiff_full_path) # result_view = self.create_view_json(json_out_view) if CResult.result_success(result_view): result = CResult.merge_result(self.Success, '处理完毕!') result = CResult.merge_result_info(result, self.Name_Browse, view_relative_path_browse) result = CResult.merge_result_info(result, self.Name_Thumb, view_relative_path_thumb) result = CResult.merge_result_info(result, self.Name_Browse_GeoTiff, view_relative_path_geotiff) else: result = result_view return result
def process(self) -> str: """ 完成元数据的所有处理操作 1. 数据解析处理 1. 文件命名检查 1. 文件完整性检查 1. 文件元数据提取并分析 1. 业务元数据提取并分析 :return: """ return CResult.merge_result(self.Success, '处理完毕!')
def sync(self) -> str: """ 处理数管中识别的对象, 与第三方模块的同步 . 如果第三方模块自行处理, 则无需继承本方法 . 如果第三方模块可以处理, 则在本模块中, 从数据库中提取对象的信息, 写入第三方模块的数据表中, 或者调用第三方模块接口 注意: 在本方法中, 不要用_quality_info属性, 因为外部调用方考虑的效率因素, 没有传入!!! :return: """ return CResult.merge_result( self.Success, '对象[{0}]的同步机制无效, 第三方系统将自行从数据中心提取最新数据! '.format(self._obj_name))