def detection_image(self, img, save_dir, rotation_angle, i=None, file=None, type='url'): if type == 'url': source = i.get('type', '') img_url = i.get('path', '') img_time = time.time() logger.info('[ticket:%s] %s %s detection start.' % (self.ticket, source, img_url)) detectied_region, img_gray, table_bbox, point_sets = ocr_cv.detect_word( img, img_url, 'url', self.ticket, companyName=self.companyName, source=source, data_dir=save_dir) logger.info( '[ticket:%s] %s %s detection end. time cost:%ss' % (self.ticket, source, img_url, round(time.time() - img_time, 3))) tmp = {} tmp['rotation_angle'] = rotation_angle tmp['detectied_region'] = detectied_region tmp['img_gray'] = img_gray tmp['img_name'] = source tmp['img_url'] = img_url tmp['id'] = i.get('id', '') tmp['page'] = i.get('page', '') tmp['table_bbox'] = table_bbox tmp['point_sets'] = point_sets self.detection_list.append(tmp) else: img_path = r'%s/%s/%s' % (self.data_dir, file, img) tmp_img_path = '%s/%s' % (file, img) detectied_region, img_gray, table_bbox, point_sets = ocr_cv.detect_word( img, img_path, data_dir=save_dir) tmp = {} tmp['rotation_angle'] = rotation_angle tmp['detectied_region'] = detectied_region tmp['img_gray'] = img_gray tmp['img_name'] = file tmp['img_url'] = '' tmp['id'] = '' tmp['page'] = '' tmp['table_bbox'] = table_bbox tmp['point_sets'] = point_sets self.detection_list.append(tmp)
def post_test(post_server=True, server_name=None): if post_server: # url='http://apis.cisdi.amiintellect.com/api/cisdi/ml/economic/{}/1234'.format(server_name) url = 'http://apis.cisdi.amiintellect.com/api/cisdi/ml/economic/{}/1234'.format( server_name) else: url = 'http://8113.204.147.34:8888/api/cisdi/ml/economic/{}/1234'.format( server_name) # 28095 # data_json=switch_post[server_name] res = requests.post( url, # json={"mytext":"from client :lalala"} json=data_json) print(res) if res.ok: # print('res.json()',res) logger.info('from server response:{}'.format( res.json())) #response是post请求的返回值
def image_deal(self): start_time = time.time() # 区域检测 detection(self) logger.info('[ticket:%s] detection_list_len: %d' % (self.ticket, len(self.detection_list))) # 区域文字识别 result_e, result_c = recognization(self) cost_time = time.time() - start_time # with open('./data/merge_content_test/result_e.txt', 'r', encoding='utf8') as f: # result_e = eval(f.read()) # with open('./data/merge_content_test/result_c.txt', 'r', encoding='utf8') as f: # result_c = eval(f.read()) # 将识别的内容按照pdf页码的顺序进行重排,从旋转90和270中的内容中选一个 result_e = result_rearrange(result_e) result_c = result_rearrange(result_c) logger.info( '[ticket:%s] detection and recognization finished. image number:%s, time cost:%ss' % (self.ticket, self.slice_num, round(cost_time, 3))) return result_e, result_c
def __init__(self, companyName=None, ticket=None): start_time = time.time() self.db = pymysql.connect(host=host_name, user=user_name, password=password, db=database, charset=charset) self.cur = self.db.cursor() # 使用 execute() 方法执行 SQL 查询 self.cur.execute("SELECT VERSION()") # 使用 fetchone() 方法获取单条数据. data = self.cur.fetchall() print("Database version : %s " % data) self.db.close() cost_time = time.time() - start_time logger.info(' time cost:%ss' % (round(cost_time, 3)))
def schedule_func(uuid): try: logger.info('开始进入模型,服务端获取数据') now_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S") t1 = time.time() content = request.get_json(silent=True, force=True) logger.info('from client type content:{},{}'.format( type(content), content)) # Do your processing voyage_id = content["voyage"] schedule_api(voyage_id) logger.info('结果已经返回') # fres={'schedule_position':res} return jsonify({'code': "0", 'message': "success"}) except Exception as e: logger.error('出错:{}\n{}'.format(e, traceback.format_exc())) return jsonify({'data': None, 'code': "1", 'message': "{}".format(e)})
def detection(self): start_time = time.time() if self.data_dir: save_dir = self.data_dir + '/detect_image' # if os.path.exists(save_dir): # shutil.rmtree(save_dir) # if os.path.exists(self.data_dir): # os.mkdir(save_dir) if not os.path.exists(save_dir) and os.path.exists(self.data_dir): os.mkdir(save_dir) else: save_dir = '' # 以url集合的方式处理图片 if self.img_data_dir: logger.info('[ticket:%s] detection start. by url...' % (self.ticket)) for i in self.img_data_dir: img_url = i.get('path', '') suffix = img_url.split('.')[-1] if suffix not in ['jpg', 'png', 'jpeg']: continue # 读取图片 try: # start_time = time.time() # logger.info('[ticket:%s] %s read image start.' % (self.ticket, img_url)) image_path = parse.quote(img_url, encoding='utf8').replace('%3A', ':') resp = request.urlopen(image_path) img = np.asarray(bytearray(resp.read()), dtype="uint8") img = cv2.imdecode(img, cv2.IMREAD_COLOR) # logger.info('[ticket:%s] %s read image end.time cost:%ss' % (self.ticket, image_path, round(time.time() - start_time, 3))) except Exception as e: logger.warning('[ticket:%s] %s error' % (self.ticket, image_path)) logger.warning(e) # 判断图片是否需要旋转 if not ocr_rotation.detect_rotation(img, 127): logger.info('[ticket:%s] %s 旋转图片.' % (self.ticket, img_url)) # 旋转90度,并进行文本检测 img_90 = np.rot90(img) detection_image(self, img_90, save_dir, i=i, rotation_angle=90) # 旋转270度,并进行文本检测 img_270 = np.rot90(img, 3) detection_image(self, img_270, save_dir, i=i, rotation_angle=270) detection_image(self, img, save_dir, i=i, rotation_angle=0) else: # 图片文本检测和表格检测 detection_image(self, img, save_dir, i=i, rotation_angle=0) # 处理本地图片 elif not self.ocr_data_files and os.path.exists(r'%s/' % self.data_dir): # we store unzipped images in company_name's temporary directory logger.info('[ticket:%s] detection start. by local path. data_dir:%s' % (self.ticket, self.data_dir)) for file in sorted(os.listdir(r'%s/' % self.data_dir), key=lambda x: x): if os.path.isdir((r'%s/' % self.data_dir) + file): for im in sorted(os.listdir((r'%s/' % self.data_dir) + file), key=lambda x: x): suffix = im.split('.')[-1] if suffix not in ['jpg', 'png', 'jpeg']: continue # 读图片 image_path = r'%s/%s/%s' % (self.data_dir, file, im) img = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), -1) # 判断图片是否需要旋转 if not ocr_rotation.detect_rotation(img, 127): logger.info('[ticket:%s] %s 旋转图片.' % (self.ticket, image_path)) # 旋转90度,并进行文本检测 img_90 = np.rot90(img) detection_image(self, img_90, save_dir, rotation_angle=90, file=file, type='path') # 旋转270度,并进行文本检测 img_270 = np.rot90(img, 3) detection_image(self, img_270, save_dir, rotation_angle=270, file=file, type='path') detection_image(self, img, save_dir, rotation_angle=0, file=file, type='path') else: # 图片文本检测和表格检测 detection_image(self, img, save_dir, rotation_angle=0, file=file, type='path') logger.info('[ticket:%s] detection finished. time cost:%ss' % (self.ticket, round(time.time() - start_time, 3)))
def result_rearrange(result): try: pdf_id_dict = {} for i, r in result.items(): img_url = r['img_url'] pdf_id = r.get('id', 'pdf_id_default') page = r.get('page', 0) if str(page).strip() == '': page = 0 if pdf_id in pdf_id_dict: if img_url in pdf_id_dict[pdf_id]: pdf_id_dict[pdf_id][img_url]['index_list'].append(i) else: pdf_id_dict[pdf_id][img_url] = { 'index_list': [i], "page": page } else: pdf_id_dict[pdf_id] = { img_url: { "index_list": [i], "page": page } } result_new = {} for pdf_id, values in pdf_id_dict.items(): # 将识别的内容按照pdf页码的顺序进行重排 for im_url, v in sorted(values.items(), key=lambda x: int(x[1]['page'])): index_list = v['index_list'] # 从旋转90和270,0中的内容中选一个 if len(index_list) == 3: index_0 = [ i for i in index_list if result[str(i)].get('rotation_angle', None) == 0 ][0] index_90 = [ i for i in index_list if result[str(i)].get('rotation_angle', None) == 90 ][0] index_270 = [ i for i in index_list if result[str(i)].get('rotation_angle', None) == 270 ][0] _, rotation_angle = \ rotation_content_judg.content_judgment_three(result[str(index_0)]['recoged_result'], result[str(index_90)]['recoged_result'], result[str(index_270)]['recoged_result']) if rotation_angle == 0: index = index_0 elif rotation_angle == 90: index = index_90 else: index = index_270 else: index = index_list[0] result_new[str(index)] = result[str(index)] return result_new except Exception as e: logger.info(e) return result
async def recog_box2word(param): # model=load_model_with_cuda() #global model if not (torch.cuda.is_available() and config.IS_USE_CUDA): global model model = model else: model = param['model'] self = param['self'] d = param['d'] type = param['type'] use_model = param['use_model'] if use_model: type = use_model img_name = d['img_name'] # lock = param['lock'] logger.info('[ticket:%s] %s %s %s model start.' % (self.ticket, d['img_name'], d['img_url'], type)) start_time = time.time() img_gray = d['img_gray'] detectied_region = d['detectied_region'] table_bbox = d['table_bbox'] point_sets = d['point_sets'] tmp = {} # lock.acquire() # 获取锁 recoged_result = eng_recog.box2word(img_gray, detectied_region, model, type, img_name) recoged_result = y_axis_deviation_deal(recoged_result) # lock.release() # 释放锁 tmp['img_url'] = d['img_url'] tmp['rotation_angle'] = d['rotation_angle'] # 文件分类, 只有“其他文件”和“打包文件”入口的文件才需要进行分类, # 且当“合同”、“发票”,“箱单”,“运单”,“申报要素”,“入库单”等入口有对应文件时, # 舍弃掉“其他文件”和“打包文件”入口对应的文件 if img_name in ('other_files', 'package_upload'): # class_time = time.time() # lock.acquire() img_name_new = file_class(recoged_result, self.rd, self.img_url_class, d['img_url'], img_name) logger.info('[ticket:%s] 文件分类.origin:%s,new:%s' % (self.ticket, img_name, img_name_new)) # lock.release() if img_name_new not in ('other_files', 'package_upload' ) and img_name_new in self.source_list: tmp = {} elif not recoged_result: # recoged_result的内容为空 tmp = {} else: tmp['img_name'] = img_name_new # result[str(i)] = tmp else: tmp['img_name'] = d['img_name'] if recoged_result: recoged_result = extract_sentence(table_bbox, recoged_result, d['img_gray'], tmp['img_name']) tmp['recoged_result'] = recoged_result tmp['id'] = d.get('id', '') tmp['page'] = d.get('page', '') tmp['table_bbox'] = table_bbox tmp['point_sets'] = point_sets logger.info('[ticket:%s] %s %s %s model end.time cost:%ss' % (self.ticket, img_name, d['img_url'], type, round(time.time() - start_time, 3))) return tmp if tmp else None
def recognization(self): # max_workers = (os.cpu_count() - 2) if os.cpu_count() < config.THREADS_NUMBER else config.THREADS_NUMBER max_workers = (os.cpu_count( )) if os.cpu_count() < config.THREADS_NUMBER else config.THREADS_NUMBER logger.info('[ticket:%s] recognization start. max_workers:%s' % (self.ticket, max_workers)) start_time = time.time() result_e = {} result_c = {} # lock = threading.Lock() # 多线程 if self.parallel == 1: # 构建线程池, 线程池默认线程是cpu核数*5, (os.cpu_count() or 1)*5 if torch.cuda.is_available() and config.IS_USE_CUDA: #and set_method: #mp.set_start_method('spawn') try: mp.set_start_method('spawn') #set_method=False except RuntimeError: pass with ProcessPoolExecutor(max_workers=max_workers) as recog_executor: new_loop = asyncio.new_event_loop() asyncio.set_event_loop(new_loop) event_loop = asyncio.get_event_loop() try: ss = time.time() # all_task_e = [] # all_task_c = [] all_task_params_e = [] all_task_params_c = [] all_task_params_groups = [] ##获取中文和英文模型所需参数的列表 for i in range(len(self.detection_list)): d = self.detection_list[i] img_name = d['img_name'] use_model = [ i[1] for i in self.rd.chi_tra if img_name == i[0] ] use_model = use_model[0] if use_model else '' # 创建英文任务参数列表 if use_model in ('', 'english'): param_e = { 'self': self, 'd': d, 'i': i, 'type': 'english', 'use_model': use_model } # english = recog_executor.submit(recog_box2word, (param_e)) all_task_params_e.append(param_e) # 创建中文任务参数列表 if use_model in ('', 'chinese', 'chi_tra', 'chi_sim'): param_c = { 'self': self, 'd': d, 'i': i, 'type': 'chinese', 'use_model': use_model } # chinese = recog_executor.submit(recog_box2word, (param_c)) all_task_params_c.append(param_c) ##要将任务分组,让每个进程得到几乎均等的任务,并行的进行计算;此处将参数列表进行分组 # url = 'http://127.0.0.1:5000' # all_urls = [url for _ in range(100)] ##要加判断两个模型参数都存在的时候才能调用 len_e = len(all_task_params_e) len_c = len(all_task_params_c) if len_e > 0: all_task_params_groups.extend( chunks(all_task_params_e, recog_executor._max_workers)) if len_c > 0: all_task_params_groups.extend( chunks(all_task_params_c, recog_executor._max_workers)) #if torch.cuda.is_available() and config.IS_USE_CUDA: # if len_e>0: # all_task_params_groups.extend(chunks(all_task_params_e,recog_executor._max_workers)) # if len_c>0: # all_task_params_groups.extend(chunks(all_task_params_c,recog_executor._max_workers)) #else: # if len_e > 0: # all_task_params_groups.extend(chunks(all_task_params_e, len_e/recog_executor._max_workers)) # if len_c > 0: # all_task_params_groups.extend(chunks(all_task_params_c, len_c/recog_executor._max_workers)) tasks = [ run(recog_executor, chunked) for chunked in all_task_params_groups ] res = event_loop.run_until_complete(asyncio.gather(*tasks)) #将二维列表变成一维列表 res = sum(res, []) logger.info('{} workers cost time final {} s'.format( recog_executor._max_workers, time.time() - ss)) #print('{} workers cost time final'.format(recog_executor._max_workers), time.time() - ss) # print # 获取英文任务的执行结果 for index_e, e in enumerate(res[0:len_e]): result_e[str(index_e)] = e # 获取中文任务的执行结果 for index_c, c in enumerate(res[len_e:]): result_c[str(index_c)] = c except Exception as e: logger.info('exception err', e, e.errno) if e.errno != errno.ECONNRESET: raise pass finally: event_loop.close() # 单线程 else: for i in range(len(self.detection_list)): d = self.detection_list[i] img_name = d['img_name'] use_model = [i[1] for i in self.rd.chi_tra if img_name == i[0]] use_model = use_model[0] if use_model else '' # 英文模型识别 if use_model in ('', 'english'): param_e = { 'self': self, 'd': d, 'i': i, 'type': 'english', 'use_model': use_model } try: data_e = recog_box2word(param_e) if data_e: result_e[str(i)] = data_e except Exception as e: logger.info('[ticket:%s] ocr_read_image. %s ' % (self.ticket, e)) # 中文模型识别 if use_model in ('', 'chinese', 'chi_tra', 'chi_sim'): param_c = { 'self': self, 'd': d, 'i': i, 'type': 'chinese', 'use_model': use_model } try: data_c = recog_box2word(param_c) if data_c: result_c[str(i)] = data_c except Exception as e: logger.info('[ticket:%s] ocr_read_image. %s ' % (self.ticket, e)) if img_class_sencod(self): result_class_sencod(self, result_e) result_class_sencod(self, result_c) if self.data_dir and os.path.exists(self.data_dir): logger.info('[ticket:%s] 写文件recoged_result.txt' % (self.ticket)) write_content = {'result_e': result_e, 'result_c': result_c} with open('%s/recoged_result.txt' % self.data_dir, 'w', encoding='utf8') as f: f.write(str(write_content) + '\n') logger.info('[ticket:%s] recognization finished. time cost:%ss' % (self.ticket, round(time.time() - start_time, 3))) return result_e, result_c
# if torch.cuda.device_count() > 1: # model = nn.DataParallel(model) # # # CPU # else: # model.load_state_dict(torch.load(crnn_model_path, map_location='cpu')) # logger.info('CRNN model loaded.') #global set_method #set_method=True if not (torch.cuda.is_available() and config.IS_USE_CUDA): crnn_model_path = './models/crnn_Rec_done_99.pth' alphabet = alphabets.alphabet nclass = len(alphabet) + 1 # crnn network model = crnn.CRNN(32, 1, nclass, 256) model.load_state_dict(torch.load(crnn_model_path, map_location='cpu')) logger.info('CRNN model loaded.') else: logger.info('will CRNN model loaded with gpu.') def load_model_with_cuda(): crnn_model_path = './models/crnn_Rec_done_99.pth' alphabet = alphabets.alphabet nclass = len(alphabet) + 1 # crnn network model = crnn.CRNN(32, 1, nclass, 256) #if torch.cuda.is_available() and config.IS_USE_CUDA: model = model.cuda() # 导入已经训练好的crnn模型 # # GPU model.load_state_dict(torch.load(crnn_model_path))
def add_message(uuid): try: logger.info('开始进入模型,服务端获取数据') t1 = time.time() content = request.get_json(silent=True, force=True) logger.info( 'from client content:{}'.format(content)) # Do your processing #response must be a string\tupe\ and so on. logger.info('将获取的json数据转为dataframe数据框') # python客户端传来的字符创,网络客户端传来的是dict logger.info('type content{}'.format(type(content))) # if isinstance(content,str): # df_new = pd.read_json(content) # logger.info('data:{}'.format(df_new.head())) # logger.info('调用gdp_percent_arima.read_data_new函数') # # # data_train,data_test,row_number=gdp_percent_arima.read_data_new(df_new) # predict_periods=3 # elif isinstance(content,dict): # # df_new = pd.DataFrame(content) # #将json数据格式转为模型的输入格式 # logger.info('将客户端传过来的json数据转为算法输入的格式') # column_name = content['param'][0][0]['historyData'][0] # logger.info('column_name:{}'.format(column_name)) # data = content['param'][0][0]['historyData'][1:] # # logger.info('ori_data:{}'.format(np.array(data).shape)) # # data_train = pd.DataFrame(data=data, columns=column_name) # logger.info('data_train_shape:{}'.format(data_train)) # # 判断数据是否含有空值和文本内容 # train_res_str_null = judge_null_text.judge_null_str_value(data_train) # data_train = train_res_str_null['df'] # # # # # name_list = data_train.drop(labels=['date'], axis=1).columns # # logger.info('data_train{}'.format(data_train.head())) # # test_ori_data = content['param'][1][0]['predictData'] # if test_ori_data != '': # test_name = content['param'][1][0]['predictData'][0] # test_data = content['param'][1][0]['predictData'][1:] # data_test = pd.DataFrame(data=test_data, columns=test_name) # # # 判断数据是否含有空值和文本内容 # # test_res_str_null = judge_null_text.judge_null_str_value(data_test) # # data_test = test_res_str_null['df'] # # #要把训练和测试数据都获取之后再判断是否存在空值,否则训练数据存在空值,那么 # #就不会获取测试数据,那么后面就会显示测试数据变量为定义 # if len(train_res_str_null) > 2: # raise Exception('存在空值') # # if len(test_res_str_null) > 2: # raise Exception('存在空值') # # logger.info('data_test{}'.format(data_test.head())) # test = ARIMA2.read_data_new(data_test) # # # for each_name in test_name: # # data_test_each = data_test[each_name] # # logger.info('data_test_each {}'.format(data_test_each.head())) # else: # test=False # # #本地to_list和tolist都可以用,但是服务器端不可以使用to_list # logger.info('column_name[0]{}'.format(column_name[0])) # last_time = data_train[column_name[0]].tolist()[-1] # logger.info('last_time:{}'.format(last_time)) # # predict_periods = content['param'][2][0]['predict_periods'] # time_periods_list=range(int(last_time)+1,int(last_time)+int(predict_periods)+1) # if predict_periods!='': # try : # predict_periods=int(predict_periods) # except Exception as e: # logger.info('期数转为整型错误{}'.format(e)) # predict_periods = 3 # else: # logger.info('预测期数未传默认为3期数') # predict_periods = 3 # # else: # logger.info('传入参数类型错误') # # # # # data_train = data_train['美国'] # # data_test = data_test['美国'] # # name_list=['美国','中国','日本','德国','英国'] # # all_country_results=[] # all_country_best_params=[] # all_relative_error_mean=[] # # logger.info('读取训练数据') # data = ARIMA2.read_data_new(data_train) # # # # t2 = time.time() # # 根据参数的顺序来进行选择不同的执行 # for data_order in range(1, len(data)): # data_subset = data[data_order] # if test is not False: # test_data_subset = test[data_order] # # print('test_data_subset',type(test_data_subset),test_data_subset) # else: # test_data_subset = False # # # print('dd', data_order, data_subset, '\n\n') # logger.info('调用ARIMA2模型;{}'.format(data_order)) # # results = ARIMA2.auto_arima_para_new(data_subset, predict_periods=predict_periods, # data_test_subset=test_data_subset) # if results is None: # logger.info('This problem is unconstrained,无法分析') # else: # # print('output_result', results) # all_country_results.append(results['predict_value']) # all_country_best_params.append(results['best_params']) # # if test is not False: # all_relative_error_mean.append(results['relative_error_mean']) # else: # all_relative_error_mean=['' for each in range(len(data)-1)] # # # logger.info('all_country_results:{}'.format(all_country_results)) # # results.headers['Access-Control-Allow-Origin'] = '*' # results['success']='0' # logger.info('len(time_periods_list){}'.format(len(time_periods_list))) # logger.info('shape{}'.format(np.array(all_country_results).T.shape)) # predict_time_value = np.column_stack((time_periods_list, np.array(all_country_results).T)).tolist() # # logger.info('len(all_relative_error_mean){}'.format(len(all_relative_error_mean))) # logger.info('all_country_best_params shape{}'.format(np.array(all_country_best_params).shape)) # # all_error_params = np.column_stack((np.array([column_name[1:],all_relative_error_mean]).T, all_country_best_params)).tolist() # # logger.info('predict_time_value{}'.format(predict_time_value)) # # # logger.info('predict_time_value{}'.format(predict_time_value)) # results['predict_value']=predict_time_value # results['all_error_params']=all_error_params # logger.info('results:{}'.format(results)) # # print('results',results) # t3=time.time() # logger.info('数据处理耗时:{}s'.format(t2 - t1)) # logger.info('模型调用耗时:{}s'.format(t3-t2)) # logger.info('总耗时{}s'.format(t3-t1)) # # # # final_results={} # # final_results['ddd']=results # # logger.info('final_results:{}'.format(final_results)) return jsonify(content1) # return jsonify(content) except Exception as e: print(e)
app = Flask(__name__, static_url_path='') # load config from config.py app.config.from_pyfile('config.py') url_prefix = app.config.get('url_prefix', '/api/cisdi/ml/economic') CORS(app, supports_credentials=True) # CORS(app, resources=r'/*') @app.route('/') def index(): return "APIs Server" app.register_blueprint(bert_qa_blueprint, url_prefix=url_prefix) # app.register_blueprint(general_arima_blueprint, url_prefix=url_prefix) # app.register_blueprint(general_corr_blueprint, url_prefix=url_prefix) # app.register_blueprint(general_regresstion_blueprint, url_prefix=url_prefix) # app.register_blueprint(special_regresstion_blueprint, url_prefix=url_prefix) # app.register_blueprint(special_population_blueprint,url_prefix=url_prefix) if __name__ == '__main__': host = app.config.get('APP_HOST', 'localhost') port = app.config.get('APP_PORT', '28095') logger.info('host:{},port:{}'.format(host, port)) # from werkzeug.contrib.fixers import ProxyFix # app.wsgi_app = ProxyFix(app.wsgi_app) # app.run(host=host, port=port, threaded=True, debug=True) app.run(host=host, debug=False, port=port)
import sys import os # print(os.path) # print(dirname(__file__)) # print(abspath(dirname(__file__))) #定义搜索路径的优先顺序,序号从0开始,表示最大优先级,sys.path.insert()加入的也是临时搜索路径,程序退出后失效。 # sys.path.insert(0, abspath(dirname(__file__))) sys.path.insert(0, os.path.dirname(__file__)) # print('nn path',os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))) sys.path.append(os.path.join(os.path.dirname(__file__), '../')) #__file__主要是解决导入某个模块时,该模块又导入了其他模块这样由于路径导入错误而报错 # print (os.path.abspath(os.path.dirname(__file__))) from app_logging import logger logger.info('hello32') logger.info('哈喽32')