def check_src_ib_files_not_locked(self, root_path, parent_path): """ 检测指定目录下的文件是否没有被锁定 1. 只有所有文件都没有被锁定, 则返回True 1. 如果有任何一个文件被锁定, 则返回False, 而且把文件信息写入message中返回 todo(注意) 这里检查所有文件是否被锁定, 在处理切片数据时, 效率会极慢!!! :param root_path: 根目录 :param parent_path: 父目录, 在加入提示信息中时, 需要将父目录加入到反馈信息中 :return: 1. 目录下是否全部文件都没有锁定, 都可以入库 1. 被锁定文件的名称列表 """ parent_path = CFile.join_file(root_path, parent_path) locked_file_list = CFile.find_locked_file_in_path(parent_path) more_locked_file = False max_locked_file_count = len(locked_file_list) if max_locked_file_count > 3: max_locked_file_count = 3 more_locked_file = True message = '' for locked_file in locked_file_list: message = CUtils.str_append( message, CFile.join_file(parent_path, locked_file)) if more_locked_file: message = CUtils.str_append(message, '...') if max_locked_file_count > 0: message = CUtils.str_append(message, '被其他应用占用了, 无法入库, 请检查解除锁定后重试入库! ') return max_locked_file_count == 0, message
def __prepare_where_condition(self, column: CColumn, sql_text: str, sql_params: dict): column_type = self.__database.db_column_type_by_name( column.db_column_type) if CUtils.equal_ignore_case(column_type.set_value_method, self.DB_Column_Set_Method_Param): sql_text = CUtils.str_append( sql_text, '{0}=:{1}'.format(column.name, self.__param_name(column.name)), ' and ') sql_params[self.__param_name(column.name)] = CUtils.any_2_str( CUtils.dict_value_by_name(column.value, self.Name_Text, '')) elif CUtils.equal_ignore_case(column_type.set_value_method, self.DB_Column_Set_Method_Function): sql_text = CUtils.str_append( sql_text, '{0}=:{1}'.format(column.name, self.__param_name(column.name)), ' and ') sql_params[self.__param_name( column.name)] = CUtils.replace_placeholder( column_type.set_value_template, dict({ self.Name_Value, CUtils.any_2_str( CUtils.dict_value_by_name(column.value, self.Name_Text, '')) })) return sql_text, sql_params
def check_all_ib_file_or_path_existed(self, ib_id): """ 判断待入库数据的元数据, 与实体数据是否相符 . 返回CResult . 如果全部相符, 则返回True . 如果有任何一个不符, 则返回False, 且把不符的文件名通过信息返回 :param ib_id: :return: """ invalid_file_list = [] more_failure_file = False sql_all_ib_file = ''' select coalesce(dm2_storage.dstownerpath, dm2_storage.dstunipath) || dm2_storage_file.dsffilerelationname as file_name , dm2_storage_file.dsffilesize as file_size , dm2_storage_file.dsffilemodifytime as file_modify_time from dm2_storage_file left join dm2_storage on dm2_storage.dstid = dm2_storage_file.dsfstorageid where dsf_ib_id = :ib_id ''' params_all_ib_file = {'ib_id': ib_id} ds_ib_file = CFactory().give_me_db(self.get_mission_db_id()).all_row( sql_all_ib_file, params_all_ib_file) for ds_ib_file_index in range(ds_ib_file.size()): file_valid = True file_name = ds_ib_file.value_by_name(ds_ib_file_index, 'file_name', '') if not CUtils.equal_ignore_case(file_name, ''): if not CFile.file_or_path_exist(file_name): file_valid = False elif not CUtils.equal_ignore_case( CFile.file_modify_time(file_name), ds_ib_file.value_by_name(ds_ib_file_index, 'file_modify_time', '')): file_valid = False elif CFile.file_size(file_name) != ds_ib_file.value_by_name( ds_ib_file_index, 'file_size', 0): file_valid = False if not file_valid: if len(invalid_file_list) <= 3: invalid_file_list.append(file_name) else: more_failure_file = True break if len(invalid_file_list) > 0: message = '' for invalid_file in invalid_file_list: message = CUtils.str_append(message, invalid_file) if more_failure_file: message = CUtils.str_append(message, '...') message = CUtils.str_append(message, '上述数据与库中记录不统一, 请重新扫描入库! ') return CResult.merge_result(self.Failure, message) else: return CResult.merge_result(self.Success, '所有文件均存在, 且与库中记录统一! ')
def ib_files_move(self, src_dir, dest_dir, only_move_subpath_and_file: bool = False): """ 移动源文件到目标路径下 . 注意: 在将源目录下的文件和子目录, 移动至目标目录下 todo(优化) 如何得知两个目录在一个存储上, 而使用目录的移动替代. 只有不同的存储, 才使用本方法!!! :param only_move_subpath_and_file: 是否仅仅移动目录下的文件和子目录 :param dest_dir: :param src_dir: :return: """ if only_move_subpath_and_file: result, failure_file_list = CFile.move_subpath_and_file_of_path_to( src_dir, dest_dir) else: result, failure_file_list = CFile.move_path_to(src_dir, dest_dir) if result: return CResult.merge_result( self.Success, '源目录[{0}]已经成功的, 完整的移动至目录[{1}]下! '.format(src_dir, dest_dir)) else: more_failure_file = False max_failure_file_count = len(failure_file_list) if max_failure_file_count > 3: max_failure_file_count = 3 more_failure_file = True message = '' for failure_file in failure_file_list: message = CUtils.str_append(message, failure_file) if more_failure_file: message = CUtils.str_append(message, '...') if max_failure_file_count > 0: message = CUtils.str_append(message, '上述数据向核心存储中迁移时出现错误, 请检查后重试入库! ') return CResult.merge_result( self.Failure, '源目录[{0}]想核心存储目录[{1}]下入库时出现错误! \n{2}'.format( src_dir, dest_dir, message))
def __prepare_if_exists(self): sql_from = self.__column_list.column_by_index(0).name for column_index in range(self.__column_list.size()): column = self.__column_list.column_by_index(column_index) if column.is_primary_key: sql_from = column.name break sql_text, sql_params = self.__prepare_where() sql_text = CUtils.str_append( 'select {0} from {1}'.format(sql_from, self.__table_name), sql_text, ' where ') return sql_text, sql_params
def process(self) -> str: """ 在这里处理将__file_info__中记录的对象所对应的文件或目录信息, 根据tags_parser_rule的定义, 进行标签识别 :return: """ # 调用父类方法 super().process() if not isinstance(self._tags_parser_rule, list): return CResult.merge_result(self.Failure, '标签解析规则必须是一个数组, 您的配置有误, 请检查!') error_list = [] for tags_parser in self._tags_parser_rule: catalog = CUtils.any_2_str( CUtils.dict_value_by_name(tags_parser, self.Name_Catalog, '')) tag_field_name = CUtils.any_2_str( CUtils.dict_value_by_name(tags_parser, self.Name_Tag, '')) keyword_field_list = CUtils.dict_value_by_name( tags_parser, self.Name_Keyword, None) data_sample = CUtils.any_2_str( CUtils.dict_value_by_name(tags_parser, self.Name_Data_Sample, '')) separator = CUtils.dict_value_by_name(tags_parser, self.Name_Separator, None) enable = CUtils.dict_value_by_name(tags_parser, self.Name_Enable, True) fuzzy_matching = CUtils.dict_value_by_name( tags_parser, self.Name_Fuzzy_Matching, False) if not enable: continue if CUtils.equal_ignore_case(tag_field_name, ''): continue if CUtils.equal_ignore_case(catalog, ''): continue if keyword_field_list is None: continue if len(keyword_field_list) == 0: continue if CUtils.equal_ignore_case(data_sample, self.Tag_DataSample_MainName): # 如果是主名, 则将对象名称和别名, 都以文件路径的格式, 补充在主名之后, 合并进行分类识别 tag_data_sample_str = CFile.join_file( CFile.join_file( CUtils.any_2_str(self.file_info.file_main_name), self.object_name), self.__file_alias_name) elif CUtils.equal_ignore_case(data_sample, self.Tag_DataSample_RelationPath): tag_data_sample_str = CUtils.any_2_str( self.file_info.file_path_with_rel_path) else: # 如果是主名, 则将对象名称和别名, 都以文件路径的格式, 补充在相对路径的文件主名之后, 合并进行分类识别 tag_data_sample_str = CFile.join_file( CFile.join_file( CUtils.any_2_str( self.file_info.file_main_name_with_rel_path), self.object_name), self.__file_alias_name) try: tag_data_sample_list = CUtils.split(tag_data_sample_str, separator) self.process_tag(catalog, tag_field_name, keyword_field_list, tag_data_sample_list, fuzzy_matching) except Exception as error: error_list.append( '对象[{0}]在处理标签库[{1}]分类[{2}]有误, 详细错误信息为: {3}'.format( self.object_name, catalog, tag_data_sample_str, error.__str__())) if len(error_list) == 0: return CResult.merge_result( self.Success, '文件或目录[{0}]对象业务分类解析成功完成!'.format( self.file_info.file_main_name_with_rel_path)) else: error_message = '文件或目录[{0}]的业务分类解析处理完毕, 但解析过程中出现了错误, 具体如下: \n'.format( self.file_info.file_main_name_with_rel_path) for error_str in error_list: error_message = CUtils.str_append(error_message, error_str) return CResult.merge_result(self.Success, error_message)
def process_mission(self, dataset): dso_id = dataset.value_by_name(0, 'dsoid', '') dso_data_type = dataset.value_by_name(0, 'dsodatatype', '') dso_object_type = dataset.value_by_name(0, 'dsoobjecttype', '') dso_object_name = dataset.value_by_name(0, 'dsoobjectname', '') CLogger().debug('开始处理对象: {0}.{1}.{2}.{3}的元数据'.format( dso_id, dso_data_type, dso_object_type, dso_object_name)) dso_object_retry_times = dataset.value_by_name(0, 'retry_times', 0) if dso_object_retry_times >= self.abnormal_job_retry_times(): dso_object_last_process_memo = CUtils.any_2_str( dataset.value_by_name(0, 'dsometadataparsememo', None)) process_result = CResult.merge_result( self.Failure, '{0}, \n系统已经重试{1}次, 仍然未能解决, 请人工检查修正后重试!'.format( dso_object_last_process_memo, dso_object_retry_times)) self.db_update_object_exception(dso_id, process_result, self.ProcStatus_Error) return process_result ds_file_info = self.get_object_info(dso_id, dso_data_type) if ds_file_info.value_by_name(0, 'query_object_valid', self.DB_False) == self.DB_False: process_result = CResult.merge_result( self.Success, '文件或目录[{0}]不存在,元数据无法解析, 元数据处理正常结束!'.format( ds_file_info.value_by_name(0, 'query_object_fullname', ''))) self.db_update_object_status(dso_id, process_result) return process_result sql_get_rule = ''' select dsdScanRule from dm2_storage_directory where dsdStorageid = :dsdStorageID and Position(dsddirectory || '{0}' in :dsdDirectory) = 1 and dsdScanRule is not null order by dsddirectory desc limit 1 '''.format(CFile.sep()) rule_ds = CFactory().give_me_db(self.get_mission_db_id()).one_row( sql_get_rule, { 'dsdStorageID': ds_file_info.value_by_name(0, 'query_object_storage_id', ''), 'dsdDirectory': ds_file_info.value_by_name(0, 'query_object_relation_path', '') }) ds_rule_content = rule_ds.value_by_name(0, 'dsScanRule', '') file_info_obj = CDMFilePathInfoEx( dso_data_type, ds_file_info.value_by_name(0, 'query_object_fullname', ''), ds_file_info.value_by_name(0, 'query_object_storage_id', ''), ds_file_info.value_by_name(0, 'query_object_file_id', ''), ds_file_info.value_by_name(0, 'query_object_file_parent_id', ''), ds_file_info.value_by_name(0, 'query_object_owner_id', ''), self.get_mission_db_id(), ds_rule_content) plugins_obj = CPluginsMng.plugins(file_info_obj, dso_object_type) if plugins_obj is None: process_result = CResult.merge_result( self.Failure, '文件或目录[{0}]的类型插件[{1}]不存在,元数据无法解析, 处理结束!'.format( ds_file_info.value_by_name(0, 'query_object_fullname', ''), dso_object_type)) self.db_update_object_status(dso_id, process_result, self.ProcStatus_Error) return process_result plugins_obj.classified() if not plugins_obj.create_virtual_content(): process_result = CResult.merge_result( self.Failure, '文件或目录[{0}]的内容提取失败, 元数据无法提取!'.format( ds_file_info.value_by_name(0, 'query_object_fullname', ''))) self.db_update_object_status(dso_id, process_result) return process_result try: metadata_parser = CMetaDataParser(dso_id, dso_object_name, file_info_obj, plugins_obj.file_content, plugins_obj.get_information()) process_result = plugins_obj.parser_metadata(metadata_parser) if CResult.result_success(process_result): message = '' step_success = ( metadata_parser.metadata.metadata_extract_result != self.DB_False) if not step_success: message = CUtils.str_append(message, '实体元数据解析出现错误', ', ') all_step_success = step_success step_success = metadata_parser.metadata.metadata_bus_extract_result != self.DB_False if not step_success: message = CUtils.str_append(message, '实体元数据解析出现错误', ', ') all_step_success = all_step_success and step_success step_success = metadata_parser.metadata.metadata_view_extract_result != self.DB_False if not step_success: message = CUtils.str_append(message, '快视图等可视化元数据解析出现错误', ', ') all_step_success = all_step_success and step_success step_success = metadata_parser.metadata.metadata_time_extract_result != self.DB_False if not step_success: message = CUtils.str_append(message, '时间元数据解析出现错误', ', ') all_step_success = all_step_success and step_success step_success = metadata_parser.metadata.metadata_spatial_extract_result != self.DB_False if not step_success: message = CUtils.str_append(message, '空间投影元数据解析出现错误', ', ') all_step_success = all_step_success and step_success if all_step_success: self.db_update_object_status(dso_id, process_result) return process_result else: process_result = CResult.merge_result( self.Failure, message) self.db_update_object_status(dso_id, process_result) return process_result else: self.db_update_object_status(dso_id, process_result) return process_result except Exception as error: process_result = CResult.merge_result( self.Failure, '文件或目录[{0}]元数据解析过程出现异常! 错误原因为: {1}'.format( ds_file_info.value_by_name(0, 'query_object_fullname', ''), error.__str__())) self.db_update_object_exception(dso_id, process_result) return process_result finally: plugins_obj.destroy_virtual_content()
def search(self, module_name: str, search_json_obj: CJson, other_option: dict = None) -> CDataSet: """ 根据搜索条件json, 检索符合要求的对象, 并以数据集的方式返回如下字段: 1. object_id 1. object_name 1. object_type 1. object_data_type 1. object_parent_id 1. object_size 1. object_lastmodifytime :param module_name: 模块名称 :param search_json_obj: :param other_option: :return: """ if search_json_obj is None: return CDataSet() params_search = dict() sql_from = '' sql_where = '' if (not CUtils.equal_ignore_case(module_name, self.ModuleName_MetaData)) and \ (not CUtils.equal_ignore_case(module_name, '')): # sql_where = "dm2_storage_object.dso_da_result#>>'{{{0},result}}'='pass'".format(module_name) sql_from = ', dm2_storage_obj_na ' sql_where = " dm2_storage_obj_na.dson_app_id = 'module_name' " condition_obj_access = search_json_obj.xpath_one(self.Name_Access, self.DataAccess_Pass) if not CUtils.equal_ignore_case(condition_obj_access, ''): condition = "dm2_storage_obj_na.dson_object_access = '{0}'".format(CUtils.any_2_str(condition_obj_access)) sql_where = CUtils.str_append(sql_where, condition, ' and ') condition_inbound_id = search_json_obj.xpath_one(self.Name_InBound, None) if not CUtils.equal_ignore_case(condition_inbound_id, ''): condition = "dm2_storage_obj.dso_ib_id = '{0}'".format(CUtils.any_2_str(condition_inbound_id)) sql_where = CUtils.str_append(sql_where, condition, ' and ') condition_tag = search_json_obj.xpath_one(self.Name_Tag, None) if condition_tag is not None: if isinstance(condition_tag, list): condition = CUtils.list_2_str(condition_tag, "'", ", ", "'", True) else: condition = CUtils.list_2_str([condition_tag], "'", ", ", "'", True) if not CUtils.equal_ignore_case(condition, ''): condition = 'dm2_storage_object.dsotags @ > array[{0}]:: CHARACTER VARYING[]'.format(condition) sql_where = CUtils.str_append(sql_where, condition, ' and ') condition_id = search_json_obj.xpath_one(self.Name_ID, None) if condition_id is not None: if isinstance(condition_id, list): condition = self.__condition_list_2_sql('dm2_storage_object_def.dsodid', condition_id, True) else: condition = self.__condition_value_like_2_sql('dm2_storage_object_def.dsodid', condition_id, True) sql_where = CUtils.str_append(sql_where, condition, ' and ') condition_name = search_json_obj.xpath_one(self.Name_Name, None) if condition_name is not None: if isinstance(condition_name, list): condition = self.__condition_list_2_sql('dm2_storage_object_def.dsodname', condition_name, True) else: condition = self.__condition_value_like_2_sql('dm2_storage_object_def.dsodname', condition_name, True) sql_where = CUtils.str_append(sql_where, condition, ' and ') condition_type = search_json_obj.xpath_one(self.Name_Type, None) if condition_type is not None: if isinstance(condition_type, list): condition = self.__condition_list_2_sql('dm2_storage_object_def.dsodtype', condition_type, True) else: condition = self.__condition_value_like_2_sql('dm2_storage_object_def.dsodtype', condition_type, True) sql_where = CUtils.str_append(sql_where, condition, ' and ') condition_group = search_json_obj.xpath_one(self.Name_Group, None) if condition_group is not None: if isinstance(condition_group, list): condition = self.__condition_list_2_sql('dm2_storage_object_def.dsodgroup', condition_group, True) else: condition = self.__condition_value_like_2_sql('dm2_storage_object_def.dsodgroup', condition_group, True) sql_where = CUtils.str_append(sql_where, condition, ' and ') if not CUtils.equal_ignore_case(sql_where, ''): sql_where = ' and {0}'.format(sql_where) sql_search = ''' select dm2_storage_object.dsoid as object_id , dm2_storage_object.dsoobjectname as object_name , dm2_storage_object.dsoobjecttype as object_type , dm2_storage_object.dsodatatype as object_data_type , dm2_storage_object.dsoparentobjid as object_parent_id , dm2_storage_object.dso_volumn_now as object_size , dm2_storage_object.dso_obj_lastmodifytime as object_lastmodifytime from dm2_storage_object, dm2_storage_object_def {0} where dm2_storage_object.dsoobjecttype = dm2_storage_object_def.dsodid and dm2_storage_object.dsoid = dm2_storage_obj_na.dson_object_id {1} '''.format(sql_from, sql_where) return CFactory().give_me_db(self.db_server_id).all_row(sql_search)
def process_mission(self, dataset) -> str: """ :param dataset: :return: """ ds_storage_id = dataset.value_by_name(0, 'query_storage_id', '') ds_storage_title = dataset.value_by_name(0, 'query_storage_title', '') ds_ib_id = dataset.value_by_name(0, 'query_ib_id', '') ds_ib_directory_name = dataset.value_by_name(0, 'query_ib_relation_dir', '') ds_ib_batch_no = dataset.value_by_name(0, 'query_ib_batchno', '') ds_ib_option = CUtils.any_2_str( dataset.value_by_name(0, 'query_ib_option', '')) CLogger().debug('与第三方模块同步的目录为: {0}.{1}'.format(ds_ib_id, ds_ib_directory_name)) data_count = 0 try: module_name_list = CJson.json_attr_value( ds_ib_option, self.Path_IB_Opt_Notify_module, None) if module_name_list is None: modules_root_dir = CSys.get_metadata_data_access_modules_root_dir( ) module_file_list = CFile.file_or_subpath_of_path( modules_root_dir, '{0}_*.{1}'.format(self.Name_Module, self.FileExt_Py)) module_name_list = list() for module_file in module_file_list: module_name_list.append(CFile.file_main_name(module_file)) sql_ib_need_notify_object = ''' select dsoid, dsoobjecttype, dsoobjectname, dso_da_result from dm2_storage_object where dso_ib_id = :ib_id ''' dataset = CFactory().give_me_db(self.get_mission_db_id()).all_row( sql_ib_need_notify_object, {'ib_id': ds_ib_id}) if dataset.is_empty(): result = CResult.merge_result( self.Success, '存储[{0}]下, 批次为[{1}]的目录[{2}]下无任何对象, 不再通知给第三方应用!'.format( ds_storage_title, ds_ib_batch_no, ds_ib_directory_name)) self.update_notify_result(ds_ib_id, result) return result CLogger().debug( '存储[{0}]下, 批次为[{1}]的目录[{2}]下有[{3}]个对象等待通知给第三方应用!'.format( ds_storage_title, ds_ib_batch_no, ds_ib_directory_name, dataset.size())) data_count = dataset.size() error_message = '' for data_index in range(data_count): record_object = dataset.record(data_index) object_id = CUtils.dict_value_by_name(record_object, 'dsoid', '') object_type = CUtils.dict_value_by_name( record_object, 'dsoobjecttype', '') object_name = CUtils.dict_value_by_name( record_object, 'dsoobjectname', '') object_da_result_text = CUtils.any_2_str( CUtils.dict_value_by_name(record_object, 'dso_da_result', '')) object_da_result = CJson() object_da_result.load_json_text(object_da_result_text) for module_name in module_name_list: module_obj = CObject.create_module_instance( CSys.get_metadata_data_access_modules_root_name(), module_name, self.get_mission_db_id()) module_id = module_name module_title = CUtils.dict_value_by_name( module_obj.information(), self.Name_Title, '') module_enable = CUtils.dict_value_by_name( module_obj.information(), self.Name_Enable, True) if not module_enable: continue module_access = object_da_result.xpath_one( '{0}.{1}'.format(module_id, self.Name_Result), self.DataAccess_Forbid) module_access_memo = object_da_result.xpath_one( '{0}.{1}'.format(module_id, self.Name_Message), '') CLogger().debug( '存储[{0}]下, 批次为[{1}]的目录[{2}]下的对象[{3}], 与模块[{4}]的访问权限为[{5}]!' .format(ds_storage_title, ds_ib_batch_no, ds_ib_directory_name, object_name, module_title, module_access)) # todo(王西亚) 仔细考虑这里是否要放开, 是放开pass的, 还是放开pass和wait!!!!!! # if not \ # ( # CUtils.equal_ignore_case(module_access, self.DataAccess_Pass) # or CUtils.equal_ignore_case(module_access, self.DataAccess_Wait) # ): # continue result = module_obj.notify_object(ds_ib_id, module_access, module_access_memo, object_id, object_name, object_type, None) if not CResult.result_success(result): message = CResult.result_message(result) CLogger().debug( '存储[{0}]下, 批次为[{1}]的目录[{2}]下的对象[{3}], 与模块[{4}]的通知处理结果出现错误, 详细情况: [{5}]!' .format(ds_storage_title, ds_ib_batch_no, ds_ib_directory_name, object_name, module_title, message)) error_message = CUtils.str_append( error_message, message) if CUtils.equal_ignore_case(error_message, ''): result = CResult.merge_result( self.Success, '存储[{0}]下, 批次为[{1}]的目录[{2}]下有[{3}]个对象成功通知给第三方应用!'.format( ds_storage_title, ds_ib_batch_no, ds_ib_directory_name, data_count)) self.update_notify_result(ds_ib_id, result) return result else: result = CResult.merge_result( self.Failure, '存储[{0}]下, 批次为[{1}]的目录[{2}]下有[{3}]个对象在通知给第三方应用时, 部分出现错误! 错误信息如下: \n{4}' .format(ds_storage_title, ds_ib_batch_no, ds_ib_directory_name, data_count, error_message)) self.update_notify_result(ds_ib_id, result) return result except Exception as error: result = CResult.merge_result( self.Failure, '存储[{0}]下, 批次为[{1}]的目录[{2}]下有[{3}]个对象通知给第三方应用时出现异常! 错误原因为: {4}!' .format(ds_storage_title, ds_ib_batch_no, ds_ib_directory_name, data_count, error.__str__())) self.update_notify_result(ds_ib_id, result) return result
def __prepare_delete(self): sql_text, sql_params = self.__prepare_where() sql_text = CUtils.str_append( 'delete from {0}'.format(self.__table_name), sql_text, ' where ') return sql_text, sql_params
def __prepare_update_data(self) -> list: sql_list = [] temp_helper_code_list = [] sql_update_set = '' sql_update_params = dict() for column_index in range(self.__column_list.size()): column = self.__column_list.column_by_index(column_index) if column.is_primary_key or (column.value is None): continue try: column_type = self.__database.db_column_type_by_name( column.db_column_type) column_value_type = CUtils.dict_value_by_name( column.value, self.Name_Type, self.DataValueType_SQL) column_value_as_text = CUtils.any_2_str( CUtils.dict_value_by_name(column.value, self.Name_Text, '')) # 如果值为原生sql, 则不管字段类型为何值, 都直接把sql存入insert_data语句中 if CUtils.equal_ignore_case(column_value_type, self.DataValueType_SQL): column_update_set = '{0}={1}'.format( column.name, column_value_as_text) elif CUtils.equal_ignore_case(column_value_type, self.DataValueType_File): column_update_set = '{0}={1}'.format( column.name, ':{0}'.format(self.__param_name(column.name))) self.__database.file2param(sql_update_params, self.__param_name(column.name), column_value_as_text) else: if CUtils.equal_ignore_case( column_type.set_value_method, self.DB_Column_Set_Method_Function): if len(column_value_as_text ) > column_type.function_param_max_size >= 0: column_data_id = CUtils.one_id() temp_helper_code_list.append(column_data_id) sql_exchange = ''' insert into ro_global_spatialhandle(code, data) values(:code, :data) ''' param_exchange = { 'code': column_data_id, 'data': column_value_as_text } sql_list.append((sql_exchange, param_exchange)) column_update_set = '{0}={1}'.format( column.name, CUtils.replace_placeholder( column_type.set_value_template, dict({ self.Name_Value: "(select data from ro_global_spatialhandle where code = '{0}')" .format(column_data_id) }))) else: if column_type.function_param_quoted: column_value_as_text = CUtils.quote( column_value_as_text) column_update_set = '{0}={1}'.format( column.name, CUtils.replace_placeholder( column_type.set_value_template, dict({ self.Name_Value: column_value_as_text }))) elif CUtils.equal_ignore_case( column_type.set_value_method, self.DB_Column_Set_Method_Geometry): if len(column_value_as_text ) > column_type.function_param_max_size >= 0: column_data_id = CUtils.one_id() temp_helper_code_list.append(column_data_id) sql_exchange = ''' insert into ro_global_spatialhandle(code, data) values(:code, :data) ''' param_exchange = { 'code': column_data_id, 'data': column_value_as_text } sql_list.append((sql_exchange, param_exchange)) column_update_set = '{0}={1}'.format( column.name, CUtils.replace_placeholder( column_type.set_value_template, dict({ self.Name_Value: "(select data from ro_global_spatialhandle where code = '{0}')" .format(column_data_id), self.Name_Srid: CUtils.dict_value_by_name( column.value, self.Name_Srid, settings.application.xpath_one( self.Path_Setting_Spatial_Srid, self.SRID_WGS84)) }))) else: if column_type.function_param_quoted: column_value_as_text = CUtils.quote( column_value_as_text) column_update_set = '{0}={1}'.format( column.name, CUtils.replace_placeholder( column_type.set_value_template, dict({ self.Name_Value: column_value_as_text, self.Name_Srid: CUtils.dict_value_by_name( column.value, self.Name_Srid, settings.application.xpath_one( self.Path_Setting_Spatial_Srid, self.SRID_WGS84)) }))) else: # if CUtils.equal_ignore_case(column_type.set_value_method, self.DB_Column_Set_Method_Param): column_update_set = '{0}={1}'.format( column.name, ':{0}'.format(self.__param_name(column.name))) sql_update_params[self.__param_name( column.name)] = column_value_as_text sql_update_set = CUtils.str_append(sql_update_set, column_update_set, ', ') except Exception as error: print(error.__str__()) raise sql_where = '' for column_index in range(self.__column_list.size()): column = self.__column_list.column_by_index(column_index) if column.is_primary_key: sql_where, sql_update_params = self.__prepare_where_condition( column, sql_where, sql_update_params) if not CUtils.equal_ignore_case(sql_where, ''): sql_where = CUtils.str_append(' where ', sql_where, ' ') sql_update = 'update {0} set {1} {2}'.format(self.__table_name, sql_update_set, sql_where) sql_list.append((sql_update, sql_update_params)) for temp_helper_code in temp_helper_code_list: sql_list.append( ("delete from ro_global_spatialhandle where code = '{0}'". format(temp_helper_code), None)) return sql_list