async def get_process(self, product_line, process_code): try: timestamp = get_now_timestamp() await change_plan(product_line, process_code, timestamp) all_data = get_context("all_data") all_data["current_plan"] = await get_current_plan_by_group( product_line) all_data[ "data_collection"] = await get_positive_and_nagetive_count( product_line, timestamp) websocket_list = get_context("websocket_list") for handler in websocket_list: try: await send_mes(handler, mes_type="data_collection", data={ "data_collection": all_data["data_collection"], "current_plan": all_data["current_plan"] }) except Exception as e: get_logger().error("send_mes error:%s", e) self.send_response_data(MesCode.success, {}, 'success get data') except Exception as e: self.send_response_data(MesCode.fail, {}, str(e))
async def get_product_line_data(product_line, process_code): try: logger = get_logger() timestamp = get_now_timestamp() last_process_code = get_finally_process(product_line) logger.debug("last_process_code~~~~~~~~~~~~~~~~~~~~~~:%s", last_process_code) plan_list = get_context("plan_list") logger.info("plan_list~~~~~~~~~~~~~~~~~~~~~~:%s", plan_list) code_map_dict = get_code_map_dict(product_line)[product_line] all_data = get_context("all_data") idi_data_dict = get_context("idi_data_dict") plan_quality = all_data["data_collection"]["plan_quality"] plan_time_range_dict, first_start_time = await get_plan_time_range(timestamp) for item in plan_list: plan_code = item["planId"] item["plan_number"] = plan_code item["material_name"] = item["planProduct"] item["material_code"] = item["productCode"] item["plan_count"] = item["planNum"] positive = plan_quality.get(plan_code,{}).get("positive_num",0) negative = plan_quality.get(plan_code,{}).get("negative_num",0) progress = get_percent(positive, item["plan_count"]) item["total_plan_qualified_count"] = positive item["total_plan_unqualified_count"] = negative item["plan_progress"] = progress if process_code in code_map_dict: current_positive_code = code_map_dict[process_code].get("positive", None) current_negative_code = code_map_dict[process_code].get("negative", None) plan_time_range_list = plan_time_range_dict.get(process_code, {}).get(plan_code, []) logger.debug("current_positive_code:%s,current_negative_code:%s~~~~~~~~~~~~~~~~~~~~~~", current_positive_code, current_negative_code) if current_positive_code: current_positive = await get_process_count_by_plan(process_code, current_positive_code, plan_code, plan_time_range_list, idi_data_dict) else: current_positive = None if current_negative_code: current_negative = await get_process_count_by_plan(process_code, current_negative_code, plan_code, plan_time_range_list, idi_data_dict) else: current_negative = None item["current_qualified_count"] = current_positive item["current_unqualified_count"] = current_negative else: item["current_qualified_count"] = None item["current_unqualified_count"] = None return plan_list except Exception as e: import traceback traceback.print_exc() raise e
async def post_process(self, product_line): try: timestamp = get_now_timestamp() msg_type = self.data.get("msg_type", None) all_data = get_context("all_data") websocket_list = get_context("websocket_list") if msg_type == "status": process_code_list = self.data["process_code_list"] status = self.data["status"] get_logger().info( "receive~~~~~~~~~~~~~~~~~~~~~~~~~~~~~msg_type:%s,process_code_list:%s,status:%s", msg_type, process_code_list, status) get_logger().debug( "equipment_status~~~~~~~~~~~~~~~~~~~~~~~```%s", all_data["equipment_status"]) all_data["equipment_status"].update( {code: str(status) for code in process_code_list}) get_logger().debug( "equipment_status~~~~~~~~~~~~~~~~~~~~~~~```%s", all_data["equipment_status"]) get_logger().debug("websocket_handlers:%s", websocket_list) for handler in websocket_list: try: await send_mes(handler, mes_type="equipment_status", data=all_data["equipment_status"]) except Exception as e: get_logger().error("send_mes error:%s", e) elif msg_type == "reset": all_data["equipment_status"] = await get_status( product_line, timestamp) all_data["current_plan"] = await get_current_plan_by_group( product_line) all_data[ "data_collection"] = await get_positive_and_nagetive_count( product_line, timestamp) for handler in websocket_list: try: await send_mes(handler, mes_type="all", data=all_data) except Exception as e: get_logger().error("send_mes error:%s", e) self.send_response_data(MesCode.success, {}, 'success get data') except Exception as e: import traceback traceback.print_exc() self.send_response_data(MesCode.fail, {}, str(e))
def get_timely_wage_info(self): col = get_target_mongo_collection('timely_wage') # 获取人员列表 person_code_name_mapper = get_context('person_code_name_mapper') all_info = [] if self.person_code: all_timely_wage_info = col.find({'person': self.person_code}, {'_id': 0}) else: all_timely_wage_info = col.find({}, {'_id': 0}) for i in all_timely_wage_info: i_price = i.setdefault('price', '') p_code = i['person'] p_name = person_code_name_mapper.get(p_code, '') wage_type_code = i['wage_type'] wage_type_name = TIMELY_WAGE_TYPE_MAPPER.get(wage_type_code, '') all_info.append({ 'person_code': p_code, 'person_name': p_name, 'wage_code': wage_type_code, 'wage_name': wage_type_name, 'price': i_price }) return all_info
async def post_update_plan(next_plan, current_plan): try: url = get_store().get_update_plan_url() all_data = get_context("all_data") plan_quality = all_data["data_collection"]["plan_quality"] client = HttpClient(AsyncHTTPClient(max_clients=1000)) data = [] # if current_plan: # data.append({ # "plan_no": current_plan, # "status": 3, # 2表示进行中,3表示暂停,4表示已完工 # "progress_detail": # 仅更新状态为暂停和已完工时传合格数和不合格数,对于进行中没有这个字段 # { # "qualified_count": plan_quality.get(next_plan,{}).get("positive_num",0), # "unqualified_count": plan_quality.get(next_plan,{}).get("negative_num",0) # } # } # ) data.append({ "plan_no": next_plan, "status": 2, # 2表示进行中,3表示暂停,4表示已完工 "progress_detail": # 仅更新状态为暂停和已完工时传合格数和不合格数,对于进行中没有这个字段 { "qualified_count": plan_quality.get(next_plan,{}).get("positive_num",0), "unqualified_count": plan_quality.get(next_plan,{}).get("negative_num",0) } }) res = await client.post(url, data=data) get_logger().info("post_update_plan data~~~~~~~~~~~~~~~~~~~~:%s", data) get_logger().debug("post_update_plan res~~~~~~~~~~~~~~~~~~~~:%s", res) client.close() except Exception as e: get_logger().error("post_update_plan error :%s", e)
def get_code_map_dict(product_line): # 从内存中获取信息 response = {} pipeline_device_in_memory = get_context('pipeline_device') target_pipeline_info = pipeline_device_in_memory.get(product_line, {}) pipeline_response = response.setdefault(product_line, {}) for value in target_pipeline_info.values(): process_list = value.setdefault('process', []) if process_list: process_code = process_list[0]['code'] if process_code: point_response = {} point_list = value.setdefault('point', []) for p in point_list: if p['classify'] == '状态': point_response.update({ 'status': p['code'] }) elif p['classify'] == '数量': point_response.update({ 'positive': p['code'] }) elif p['classify'] == '不合格数': point_response.update({ 'negative': p['code'] }) pipeline_response.update( { process_code: point_response } ) return response
def device_handler(msg): get_logger().info('init pipeline device info') get_logger().info(msg) if msg: msg = msg[0]['children'][0] pipeline_device_in_memory = get_context('pipeline_device') for pipeline_info in msg.setdefault('instance_list', []): get_logger().debug(pipeline_info) pipeline_code = pipeline_info['code'] device_info = pipeline_info['children'][0].setdefault( 'instance_list', []) get_logger().debug(device_info) pipeline_device = {} for d in device_info: device_code = d['code'] deivce_data_mapper = {} for i in d.setdefault('children', []): # 此处将设备下面挂载的所有类型数据放入内存,后边可能会用到 i_type = i['class_code'] i_data = i.setdefault('instance_list', []) deivce_data_mapper.update({i_type: i_data}) pipeline_device.update({device_code: deivce_data_mapper}) pipeline_device_in_memory.update({pipeline_code: pipeline_device}) get_logger().info(pipeline_device_in_memory) get_logger().info( 'pipeline:{} device info inited'.format(pipeline_code))
async def post_process(self, product_line): try: get_logger().info("receive checkin data:%s", self.data) all_data = get_context("all_data") all_data["check_in"] = {"checkinInfo": self.data["checkinInfo"]} websocket_list = get_context("websocket_list") get_logger().debug("websocket_handlers:%s", websocket_list) for handler in websocket_list: try: await send_mes(handler, mes_type="check_in", data=all_data["check_in"]) except Exception as e: get_logger().error("send_mes error:%s", e) self.send_response_data(MesCode.success, {}, 'success get data') except Exception as e: self.send_response_data(MesCode.fail, {}, str(e))
async def get_process(self, pipeline_code): attendance_info = get_context('attendance_info') print(attendance_info) try: self.send_response_data(MesCode.success, attendance_info.setdefault(pipeline_code, {}), 'get attendance info successfully') except Exception as e: self.send_response_data(MesCode.fail, None, str(e))
def get_person_fz_group(self, person_code): fz_group_info = get_context('group_info') for fz in fz_group_info: fz_members = fz['members'] fz_process = fz['processCodes'] for m in fz_members: if m['code'] == person_code: self.target_process_codes = fz_process
async def post_process(self): try: request_body = loads(self.request.body) checkin_info = request_body['checkin_info'] self.pipeline_code = request_body['pipeline_code'] # 更新人员签到信息 self.process_checkin_info(checkin_info) # 同步内存分组信息 msg = get_context('mdm_group_info') sync_group_info(msg) attendance_info = get_context('attendance_info') # 返回给前端生产线最新成员分组信息 self.send_response_data(MesCode.success, attendance_info.setdefault(self.pipeline_code, {}).setdefault('checkinInfo', {}), 'success post data') await self.send_attendance_info_to_monitor() except Exception as e: self.send_response_data(MesCode.fail, None, str(e))
def get_process_code_list(product_line): # 从内存中获取信息 pipeline_device_in_memory = get_context('pipeline_device') target_pipeline_info = pipeline_device_in_memory.get(product_line, {}) process_seq = [] for value in target_pipeline_info.values(): process_info = value.setdefault('process', []) for p in process_info: process_seq.append(p['code']) return process_seq
async def cron_main(): try: timestamp = get_now_timestamp() product_line = get_context("product_line") websocket_list = get_context("websocket_list") if not product_line: product_line = "product_line_6t90" all_data = get_context("all_data") if not all_data["is_computed"]: await get_init_all_data(product_line) else: all_data["data_collection"] = await get_positive_and_nagetive_count(product_line, timestamp) for handler in websocket_list: try: await send_mes(handler, mes_type="data_collection", data={"data_collection": all_data["data_collection"], "current_plan": all_data["current_plan"]}) except Exception as e: get_logger().error("send_mes error:%s", e) except Exception as e: get_logger().error("cron_main error:%s", e)
async def get_process(self, product_line): try: t1 = time.time() all_data = get_context("all_data") # if all_data.get("is_computed"): # num_data = all_data # else: num_data = await get_init_all_data(product_line) get_logger().info("all data time:%s", time.time() - t1) self.send_response_data(MesCode.success, num_data, 'success get data') except Exception as e: self.send_response_data(MesCode.fail, {}, str(e))
async def get_process(self, pipeline_code): attendance_info = get_context('attendance_info') try: all_attendance_info = attendance_info.setdefault(pipeline_code, {}) on_work_info = loads(dumps(all_attendance_info)) for x in on_work_info.setdefault('checkinInfo', []): on_work_members = [w for w in x.setdefault('members', []) if w.setdefault('on_work', False)] x.update({ 'members': on_work_members }) self.send_response_data(MesCode.success, on_work_info, 'get attendance info successfully') except Exception as e: self.send_response_data(MesCode.fail, None, str(e))
def get_finally_process(product_line): # 从内存中获取信息 pipeline_device_in_memory = get_context('pipeline_device') target_pipeline_info = pipeline_device_in_memory.get(product_line, {}) process_seq = [] for value in target_pipeline_info.values(): process_info = value.setdefault('process', []) for p in process_info: process_seq.append({'code': p['code'], 'seq': int(p['sequence'])}) if process_seq: process_seq = sorted(process_seq, key=lambda x: x['seq']) return process_seq[-1]['code'] else: return None
def get_next_plan(current_plan): plan_list = get_context("plan_list") plan_code_list = [ i["planId"] for i in sorted(plan_list, key=lambda x: x["sequence"]) ] get_logger().debug("get_next_plan ~~~~~~~~~~~~~~~~~~ plan_list:%s", plan_code_list) length = len(plan_code_list) if not current_plan: return plan_code_list[0] elif current_plan in plan_code_list: index = plan_code_list.index(current_plan) if index < length - 1: return plan_code_list[index + 1] else: raise Exception("no next plan")
async def send_attendance_info_to_monitor(self): try: attendance_info = get_context('attendance_info') all_attendance_info = attendance_info.setdefault(self.pipeline_code, {}) on_work_info = loads(dumps(all_attendance_info)) for x in on_work_info.setdefault('checkinInfo', []): on_work_members = [w for w in x.setdefault('members', []) if w.setdefault('on_work', False)] x.update({ 'members': on_work_members }) except Exception as e: get_logger().exception(str(e)) else: await push_attendance_info(self.pipeline_code, on_work_info) get_logger().info('push attendance info to monitor') get_logger().info(on_work_info)
async def init_timely_wage_info(msg): # 同步计时工资分组信息 get_logger().info('计时工资数据同步') get_logger().info(msg) sync_timely_wage_info(msg) # 更新库中信息 col = get_target_mongo_collection('timely_wage') refreshed_timely_wage_info = get_context('timely_wage_info') for k,v in refreshed_timely_wage_info.items(): person_code = k wage_type = v # 删除该员工现有的计费模式并更新 col.remove({'person': person_code, 'wage_type': {'$ne': wage_type}}) col.update({'person': person_code}, {'$set': {'wage_type': wage_type}}, upsert=True)
def pipeline_process_handler(msg): get_logger().info('init pipeline process info') get_logger().debug(msg) if msg: msg = msg[0]['children'][0] pipeline_process_in_memory = get_context('pipeline_process') for pipeline_info in msg.setdefault('instance_list', []): get_logger().debug(pipeline_info) pipeline_code = pipeline_info['code'] process_info = pipeline_info['children'][0].setdefault( 'instance_list', []) get_logger().debug(process_info) pipeline_process = {} for x in process_info: pipeline_process.update({x['code']: x['sequence']}) pipeline_process_in_memory.update( {pipeline_code: pipeline_process}) get_logger().info( 'pipeline:{} process info inited'.format(pipeline_code))
async def get_init_all_data(product_line): try: logger = get_logger() all_data = get_context("all_data") update_context("product_line", product_line) timestamp = get_now_timestamp() all_data["process_list"] = await get_process_list(product_line, timestamp) logger.debug("process_list~~~~~~~~~~~~~~~~~~~~:%s", all_data["process_list"]) all_data["check_in"] = await get_attendance_data(product_line) logger.debug("check_in~~~~~~~~~~~~~~~~~~~~~~~~:%s", all_data["check_in"]) all_data["plan"] = await get_plan_data(product_line) logger.debug("plan~~~~~~~~~~~~~~~~~~~~~~~~~~~~:%s", all_data["plan"]) all_data["equipment_status"] = await get_status(product_line, timestamp) logger.debug("equipment_status~~~~~~~~~~~~~~~~~~~~~~~~:%s", all_data["equipment_status"]) all_data["current_plan"] = await get_current_plan_by_group(product_line) logger.debug("current_plan~~~~~~~~~~~~~~~~~~~~~~~~:%s", all_data["current_plan"]) all_data["data_collection"] = await get_positive_and_nagetive_count(product_line, timestamp) logger.debug("data_collection~~~~~~~~~~~~~~~~~~~~~~~~:%s", all_data["data_collection"]) all_data["is_computed"] = True return all_data except Exception as e: traceback.print_exc() get_logger().error("get_all_data error:%s", e)
async def process_timely_wage_detail(self): person_code_name_mapper = get_context('person_code_name_mapper') response = [] # 获取数据库链接 col = get_target_mongo_collection('timely_wage') all_timely_wage_detail = col.find({}, {'_id': 0}) query_time = datetime.now() for d in all_timely_wage_detail: person_code = d['person'] person_name = person_code_name_mapper[person_code] wage_type = d['wage_type'] price = d['price'] person_working_hour = await get_working_hour_info({ 'year': query_time.year, 'month': query_time.month, 'person_list': [ { 'person_code': person_code, 'wage_type': wage_type } ] }) person_wage = price * person_working_hour[person_code] # TODO:此处人员对应产线需要动态获取 response.append({ 'wage': person_wage, 'person_code': person_code, 'person_name': person_name, 'workshop': '六车间', 'pipeline': 'T90继电器生产线', 'wage_type': TIMELY_WAGE_TYPE_MAPPER[wage_type], 'piece_amount': None, 'total_time': person_working_hour[person_code], 'wage_code': 'time' }) return response
def process_report_detail(self, report_detail): # TODO: 此处需要将具体的工资计算方法加入,几个for循环可分别作为函数处理 # 获取数据库链接 col = get_target_mongo_collection('process_materiel_price') # 从内存中获取报工分组信息 bg_group_info = get_context('group_info') materiel_group_info = get_context('materiel_group_info') individual_wage = [] for g in report_detail: group_code = g['groupCode'] target_group_info = filter(lambda x: x['groupCode'] == group_code, bg_group_info) target_group_members = [] target_group_processes = [] group_total_wage = 0 group_total_materiel_amount = 0 for x in target_group_info: target_group_members = x['members'] target_group_processes = x['processCodes'] for r in g['report']: # 查找物料分组,如果没有找到,使用默认分组 try: materiel_code = r['materielCode'] target_materiel = filter(lambda x: materiel_code in x['materiel_list'], materiel_group_info) target_materiel_group_code = next(target_materiel)['code'] except StopIteration: get_logger().exception('materiel: {} get lost'.format(materiel_code)) # TODO:此处默认物料暂时写死,后边可以从主数据获取 target_materiel_group_code = 'materiel_group_instance_default' get_logger().info('use code:{}'.format(target_materiel_group_code)) materiel_amount = r['qualified_count'] group_total_materiel_amount += materiel_amount group_materiel_price_info = col.find( {'materiel_group': target_materiel_group_code, 'process': {'$in': target_group_processes}}, {'_id': 0}) group_total_price = 0 for p in group_materiel_price_info: p_value = p.setdefault('price', 0) if not p_value: p_value = 0 group_total_price += float(p_value) get_logger().info('group: {} => total price: {}'.format(target_materiel_group_code, group_total_price)) group_total_wage += group_total_price * materiel_amount get_logger().info('group: {} => total wage: {}'.format(group_code, group_total_wage)) member_count = len(target_group_members) avg_wage = round(group_total_wage / member_count, 2) avg_amount = int(group_total_materiel_amount / member_count) for m in target_group_members: person_piece_wage = { 'wage': avg_wage, 'person_code': m['code'], 'person_name': m['name'], 'workshop': m['workshop'], 'pipeline': m['pipeline'], 'wage_type': PEICE_WAGE['name'], 'piece_amount': avg_amount, 'total_time': None, 'wage_code': 'piece' } individual_wage.append(person_piece_wage) return individual_wage
def get_name_by_code(code): person_info_list = get_context('person_info') for person in person_info_list: if code == person['code']: return person['name'] return 'Alien'
async def open(self): websocket_list = get_context("websocket_list") websocket_list.append(self)
async def get_process(self, *args, **kwargs): # 尝试从请求url中获取get_argument,根据指定的员工返回对应的工序-物料单价内容 person_code = self.get_argument('person_code', None) self.target_process_codes = None if person_code is not None: self.get_person_fz_group(person_code) # 数据库中信息 col = get_target_mongo_collection('process_materiel_price') all_price_info = col.find({}, {'_id': 0, 'process': 1, 'materiel_group': 1, 'price': 1}) all_price_info = list(all_price_info) process_info = get_context('process_info') materiel_group_info = get_context('materiel_group_info') response_columns = [{ 'name': 'process', 'label': '工序', 'field': 'process' }] response_mapper = [] response_row_data = [] for m in materiel_group_info: response_columns.append({ 'name': m['code'], 'label': m['name'], 'field': m['code'] }) response_mapper.append({ 'code': m['code'], 'name': m['name'], 'materiel_names': m['materiel_names'] }) for p in process_info: process_name = p['name'] process_code = p['code'] response_record = { 'process': process_name, 'process_code': process_code } # TODO: 此段代码需要优化 if self.target_process_codes: if process_code in self.target_process_codes: for m in materiel_group_info: materiel_group_code = m['code'] materiel_group_name = m['name'] target_record = filter( lambda x: x['process'] == process_code and x['materiel_group'] == materiel_group_code, all_price_info) try: target_value = next(target_record) response_record.update({ materiel_group_code: target_value.setdefault('price', '') }) except Exception: response_record.update({ materiel_group_code: '' }) response_row_data.append(response_record) else: for m in materiel_group_info: materiel_group_code = m['code'] materiel_group_name = m['name'] target_record = filter( lambda x: x['process'] == process_code and x['materiel_group'] == materiel_group_code, all_price_info) try: target_value = next(target_record) response_record.update({ materiel_group_code: target_value.setdefault('price', '') }) except Exception: response_record.update({ materiel_group_code: '' }) response_row_data.append(response_record) response = { 'columns': response_columns, 'rows': response_row_data, 'mapper': response_mapper } self.send_response_data(MesCode.success, response, 'get process/materiel_group price mapper succefully')
def get_process_sequence_dict(product_line): # 从内存中获取信息 pipeline_process_in_memory = get_context('pipeline_process') return pipeline_process_in_memory.get(product_line, None)
def sync_group_info(msg): on_work_mapper = get_current_on_work_info() pipeline_info = msg[0]['children'][0].setdefault('instance_list', []) get_logger().info('pipeline info') get_logger().info(pipeline_info) attendance_info = get_context('attendance_info') process_threshold_info = get_context('process_threshold') if not process_threshold_info: process_threshold = get_process_threshold_info() update_context('process_threshold', process_threshold) process_threshold_info = process_threshold for pipeline in pipeline_info: pipeline_code = pipeline['code'] pipeline_group = pipeline['children'] pipeline_process_in_memeory = [] pipeline_process_codes_in_memeory = [] group_info = [] for group in pipeline_group[0].setdefault('instance_list', []): group_code = group['code'] process_in_group = [] person_in_group = [] # 没有工序信息的默认排在后边 sequence_flag = 99999 for c in group.setdefault('children', []): if c['class_code'] == 'process': # 处理分组中工序分组 process_list = c.setdefault('instance_list', []) process_list = sorted(process_list, key=lambda x: int(x['sequence'])) for i, p in enumerate(process_list): if i == 0: sequence_flag = int(p['sequence']) if p['code'] not in pipeline_process_codes_in_memeory: pipeline_process_in_memeory.append([p['code'], p['name'], p['classify'] != '自动', process_threshold_info.setdefault(p['code'], 0), int(p['sequence'])]) pipeline_process_codes_in_memeory.append(p['code']) process_in_group.append(p['code']) elif c['class_code'] == 'person': # 处理分组中人员分组 person_list = c.setdefault('instance_list', []) for p in person_list: person_in_group.append({ 'code': p['code'], 'name': p['name'], 'on_work': on_work_mapper.setdefault(p['code'], False) }) group_info.append({ 'processCodes': process_in_group, 'groupCode': group_code, 'members': person_in_group, 'sequence_flag': sequence_flag }) # 完成group_info的排序 group_info = sorted(group_info, key=lambda x: x['sequence_flag']) for g in group_info: del g['sequence_flag'] # 更新产线分组数据 get_logger().info('更新产线分组数据') pipeline_checkin_info = attendance_info.setdefault(pipeline_code, {}) pipeline_process_in_memeory = sorted(pipeline_process_in_memeory, key=lambda x: int(x[-1])) for x in pipeline_process_in_memeory: x.pop() pipeline_checkin_info.update({ 'processList': pipeline_process_in_memeory, 'checkinInfo': group_info })
def on_close(self): websocket_list = get_context("websocket_list") websocket_list.remove(self)