class XioFigurePlot(QtGui.QWidget):
    '''这个类为绘制类
    '''

    def __init__(self):
        super(XioFigurePlot, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.thread_figure = Timer('updatePlay()', sleep_time=2)
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

    def draw(self):
        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            da=data_access.EquipmentData()
            result=da.select()
            fp.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))  # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            L_eff=[]
            oee = Figure_OEE()
            da=data_access.OEEData()
            result=da.select()
            for i in range(1,len(result[-1])):
                if result[-1][i]!=None:
                     L_eff.append(result[-1][i])
            oee.plot(*tuple(L_eff))  # 参数
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            da=data_access.EquipmentTimeData()
            result = da.select()
            loss.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        def draw_mt():  # 绘制耗材使用图
            mt = Figure_MT()
            mt.plot()
            graphicscene_mt = QtGui.QGraphicsScene()
            graphicscene_mt.addWidget(mt.canvas)
            self.ui.graphicsView_MT.setScene(graphicscene_mt)
            self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        draw_mt()
        draw_oee()
Example #2
0
class XioFigurePlot(QtGui.QWidget):
    '''这个类为绘制类
    '''
    def __init__(self):
        super(XioFigurePlot, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.thread_figure = Timer('updatePlay()', sleep_time=10)
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

    def draw(self):
        def draw_fp(): # 绘制损失饼图
            fp = Figure_Pie()
            fp.plot(*(1, 1, 1, 1)) # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee(): # 绘制oee日推图
            pass

        def draw_loss(): # 绘制损失直方图
            pass

        def draw_mt(): # 绘制耗材使用图
            pass

        draw_fp()
Example #3
0
def batch_size_linear_search():
    min = 8
    max = 600
    step_size = 8

    optimizer = lambda x: torch.optim.SGD(x, lr=0.1)
    experiment_name = "batch_size_linear_search"
    t = Timer()

    batch_size_times = {}
    for i, batch_size in enumerate(range(min, max, step_size)):
        t.start()
        main(experiment_name, optimizer, epochs=i + 2, batch_size=batch_size)
        elapsed_time = t.stop()
        batch_size_times[batch_size] = elapsed_time

    pickle.dump(batch_size_times, open("batch_size_times.pickle", "wb"))

    # Plot
    batch_sizes = []
    times = []
    for k in sorted(batch_size_times):
        batch_sizes.append(k)
        times.append(batch_size_times[k])

    plt.plot(np.array(batch_sizes), np.array(times))
    plt.xlabel("Batch Size")
    plt.ylabel("Epoch Time")
    plt.title("Batch Size vs Epoch Time")
    plt.show()
Example #4
0
class XioAll(QtGui.QWidget):
    '''这个类为主程序类
    '''
    HOST = 'localhost'
    PORT = 8081
    TOTAL = 0
    isStatic = True
    Shumei = None
    action = None
    pre_action = None
    action_video = None  # 视频内能识别
    pre_action_video = None

    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.stype = 0
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()

        # 控制输入视频地址
        self.CamPath = ""
        self.isWebCam = False
        self.isCamChanged = False

        # 数据库操作
        self.da = data_access.DataAccess()

        # 若日期发生改变,自行插入全零数据
        result_loss = self.da.select_(
            "select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            self.da.operate_(
                'insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                '("%s",%d,%d,%d,%d,%d,%d)' %
                (current_time, 10, 10, 10, 10, 0, 0))
        else:
            pass

        result_oee = self.da.select_(
            'select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            self.da.operate_(
                'insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass

        self.yolo_Model = Yolo_Model.Yolo_Model()
        # self.displayMessage("...加载YOLO模型成功...")

        self.thread_figure = Timer('updatePlay()',
                                   sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'),
                     self.draw)
        self.thread_figure.start()

        # 按钮功能
        self.connect(self.ui.fileSelectButton, QtCore.SIGNAL('clicked()'),
                     self.fileSelect)
        self.connect(self.ui.mailSenderButton, QtCore.SIGNAL('clicked()'),
                     self.mailSend)
        self.connect(self.ui.confirmDateButton, QtCore.SIGNAL('clicked()'),
                     self.displayMonthData)
        self.connect(self.ui.WebCamButton, QtCore.SIGNAL('clicked()'),
                     self.webCamInput)

        self.server = ThreadedTCPServer(
            (self.HOST, self.PORT),
            ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()

        self.thread_shumei = threading.Thread(target=self.shumeiDeal)
        self.thread_shumei.start()

        self.thread_control = Timer('updatePlay()',
                                    sleep_time=10)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_control, QtCore.SIGNAL('updatePlay()'),
                     self.control_judge)
        self.thread_control.start()

        # 12-25
        self.thread_recogtiaoshi = Timer('updatePlay()',
                                         sleep_time=0.3)  # 该线程用来每隔0.3秒分析图像
        self.connect(self.thread_recogtiaoshi, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogtiaoshi)
        self.thread_recogtiaoshi.start()

        self.thread_recogzhuangji = Timer('updatePlay()',
                                          sleep_time=0.3)  # 该线程用来每隔0.3秒分析图像
        self.connect(self.thread_recogzhuangji, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogzhuangji)
        self.thread_recogzhuangji.start()

        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True

        # 装机操作
        self.mask_right = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/right.jpg"
        )
        self.mask_left = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/maskleft.jpg"
        )
        self.left_base = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/left_base.jpg",
            0)
        self.redLower = np.array([26, 43, 46])
        self.redUpper = np.array([34, 255, 255])
        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

        # 调试操作
        self.status_LUP = [0] * 8
        self.status_LDOWN = [0] * 8
        self.isActionStartUP = False
        self.isActionStartDOWN = False

        self.x1UP, self.y1UP, self.x2UP, self.y2UP = [0, 0, 0, 0]
        self.X1DOWN, self.Y1DOWN, self.X2DOWN, self.Y2DOWN = [0, 0, 0, 0]

        # 定时投入文字
        self.putTextStart_time = None
        self.putTextEnd_time_left = None
        self.putTextEnd_time_right = None
        self.putTextEnd_time_up = None
        self.putTextEnd_time_down = None

    def fileSelect(self):
        absolute_path = QFileDialog.getOpenFileName(self, '视频选择', '.',
                                                    "MP4 files (*.mp4)")

        if absolute_path is not "":
            self.reFlushDetection()
            self.CamPath = absolute_path
            self.isWebCam = False
            self.isCamChanged = True
        else:
            self.displayMessage("...未进行选择,视频源路径不变...")

    def webCamInput(self):
        webCamDict = {"address": "", "status": ""}
        webCamBox = WebCamBox("网络摄像头管理", webCamDict)

        # 处理主动关闭输入框
        if webCamBox.exec_():
            return
        if webCamDict["status"] == "":
            return

        ret = False
        try:
            cap = cv2.VideoCapture(webCamDict["address"])
            ret, frame = cap.read()
        except Exception as e:
            raise e
        finally:
            if ret is True:
                self.CamPath = webCamDict["address"]
                self.isWebCam = True
                self.isCamChanged = True
                self.reFlushDetection()
                self.displayMessage("...更换网络摄像头成功...")
            else:
                if webCamDict["status"] != "WrongPassword":
                    self.displayMessage("...IP地址错误,请重新输入...")

    def reFlushDetection(self):
        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True
        self.tiaoshitime = 0

        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

        self.status_LUP = [0] * 10
        self.status_LDOWN = [0] * 15
        self.isActionStartUP = False
        self.isActionStartDOWN = False

        # 定时投入文字
        self.putTextStart_time = None
        self.putTextEnd_time_left = None
        self.putTextEnd_time_right = None
        self.putTextEnd_time_up = None
        self.putTextEnd_time_down = None

        self.displayMessage("...初始化检测参数成功...")

    def mailSend(self):
        list_mail = []
        dilogUi = warningBox(u"邮件发送", u"请输入邮箱:", list_mail)
        if dilogUi.exec_():
            return
        if len(list_mail) == 0:
            return
        if len(list_mail[0]) != 0:
            print("准备发送!")

            list_oee = self.da.select_oee()
            list_loss = self.da.select_loss()
            dict_oee = {}
            hour = min(time.localtime()[3], 18)
            for i in range(8, hour + 1):
                dict_oee[str(i) + "点"] = list_oee[i - 8]
            sender = '*****@*****.**'
            list_mail.append("*****@*****.**")

            message = "侧板焊接生产线生产数据\n" \
                      "\n" \
                      "今日OEE效能数据如下所示:\n" \
                      "{}" \
                      "\n" \
                      "\n" \
                      "*注:效率为0时未进行检测。\n" \
                      "\n" \
                      "今日设备运行情况分布如下所示:" \
                      "\n" \
                      "清理焊嘴:{} \n" \
                      "装载侧板:{} \n" \
                      "机器静止:{} \n" \
                      "机器工作:{} \n".format(dict_oee, list_loss[0], list_loss[1], list_loss[2], list_loss[3])

            msg_wait = MIMEText(message, 'plain', 'utf-8')
            try:
                smtpObj = smtplib.SMTP()
                smtpObj.connect("smtp.qq.com", 25)
                mail_license = "wuhchbmndrjabgcc"
                print("准备登录")
                smtpObj.login(sender, mail_license)
                print("登录成功!")
                smtpObj.set_debuglevel(1)
                smtpObj.sendmail(sender, list_mail, msg_wait.as_string())
            except Exception as e:
                print(e)

    def displayMonthData(self):
        self.ui.DateTable.clear()

        # 获取月份
        select_date = self.ui.dateEdit.text()
        queryByMonth = "select * from oee_date where date_format(SJC,'%Y-%m')='{}'".format(
            select_date)

        # 取数据正常
        result = self.da.select_(queryByMonth)
        row = len(result)
        if row == 0:
            self.ui.DateTable.setRowCount(1)
            self.ui.DateTable.setColumnCount(1)
            self.ui.DateTable.setEditTriggers(
                QtGui.QTableWidget.NoEditTriggers)
            self.ui.DateTable.horizontalHeader().setResizeMode(
                QtGui.QHeaderView.Stretch)
            newItem = QtGui.QTableWidgetItem(
                "                    日期 {} 暂无数据".format(
                    select_date))  # 接受str,无法接收int
            textFont = QtGui.QFont("song", 16, QtGui.QFont.Bold)
            newItem.setFont(textFont)

            self.ui.DateTable.setItem(0, 0, newItem)
        else:
            # 表格属性
            self.ui.DateTable.setRowCount(row)
            self.ui.DateTable.setColumnCount(12)
            self.ui.DateTable.setHorizontalHeaderLabels([
                '日期', '8时', '9时', '10时', '11时', '12时', '13时', '14时', '15时',
                '16时', '17时', '18时'
            ])
            self.ui.DateTable.setEditTriggers(
                QtGui.QTableWidget.NoEditTriggers)
            self.ui.DateTable.horizontalHeader().setResizeMode(
                QtGui.QHeaderView.Stretch)

            # 数据处理
            for i in range(row):
                list_data = list(result[i])
                for j in range(12):
                    if j == 0:
                        cnt = str(list_data[j])[5:10]
                    else:
                        cnt = str(int(list_data[j]))
                    newItem = QtGui.QTableWidgetItem(cnt)  # 接受str,无法接收int
                    textFont = QtGui.QFont("song", 12, QtGui.QFont.Bold)
                    newItem.setFont(textFont)
                    self.ui.DateTable.setItem(i, j, newItem)

    def control_judge(self):
        pass

    def video_recogtiaoshi(self):
        if self.isWebCam:
            return
        frame = self.frame_left
        frameDown = frame[250:500, 680:970]

        # 上方坐标
        frameUP = frame[140:400, 540:800]

        # 根据队列进行检测

        isPersonUP, self.x1UP, self.y1UP, self.x2UP, self.y2UP = self.yolo_Model.detect_person(
            frameUP)
        if isPersonUP:
            self.status_LUP.append(1)
        else:
            self.status_LUP.append(0)
        self.status_LUP.pop(0)

        isPersonDOWN, self.X1DOWN, self.Y1DOWN, self.X2DOWN, self.Y2DOWN = self.yolo_Model.detect_person(
            frameDown)
        if isPersonDOWN:
            self.status_LDOWN.append(1)
        else:
            self.status_LDOWN.append(0)
        self.status_LDOWN.pop(0)

        if sum(self.status_LUP) > 5 and self.isActionStartUP is False:
            self.displayMessage("工人上方开始清理焊嘴")
            self.isActionStartUP = True
            self.putTextStart_time = time.time()
            self.da.insert_action_("qinglihanzuiUP", 0)
        if sum(self.status_LUP) < 2 and self.isActionStartUP is True:
            self.displayMessage("工人上方结束清理焊嘴")
            self.isActionStartUP = False
            self.putTextEnd_time_up = time.time()
            self.da.insert_action_("qinglihanzuiUP", 1)
            self.da.update_loss_("action1", 1)
            self.da.update_loss_("action3", random.randint(0, 2))

        if sum(self.status_LDOWN) > 5 and self.isActionStartDOWN is False:
            self.displayMessage("工人下方开始清理焊嘴")
            self.isActionStartDOWN = True
            self.putTextStart_time = time.time()
            self.da.insert_action_("qinglihanzuiDOWN", 0)
        if sum(self.status_LDOWN) == 0 and self.isActionStartDOWN is True:
            self.displayMessage("工人下方结束清理焊嘴")
            self.isActionStartDOWN = False
            self.putTextEnd_time_down = time.time()
            self.da.insert_action_("qinglihanzuiDOWN", 1)
            self.da.update_loss_("action1", 1)
            self.da.update_loss_("action3", random.randint(0, 2))

    def video_recogzhuangji(self):
        if self.isWebCam:
            return
        img = self.frame_left
        img = cv2.resize(img, (1280, 720))
        img_right = cv2.bitwise_and(self.mask_right, img)
        hsv_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2HSV)
        mask_det = cv2.inRange(hsv_right, self.redLower, self.redUpper)
        img_left = cv2.bitwise_and(self.mask_left, img)
        hsv_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2HSV)
        mask_det1 = cv2.inRange(hsv_left, self.redLower, self.redUpper)

        if self.is_JudgeRL is True:
            if np.sum(mask_det) < 10000:
                self.Lright.append(1)
            else:
                self.Lright.append(0)
            self.Lright.pop(0)
            if sum(self.Lright) > 6 and self.isRightStart is False:
                self.displayMessage("工人开始右方装载侧板")
                self.isRightStart = True
                self.putTextStart_time = time.time()
                self.da.insert_action_("zhuangjiRIGHT", 0)

            if sum(self.Lright) < 2 and self.isRightStart is True:
                self.displayMessage("工人结束右方装载侧板")
                self.isRightStart = False
                self.putTextEnd_time_right = time.time()
                self.da.insert_action_("zhuangjiRIGHT", 1)
                self.da.update_loss_("action2", 1)
            if np.sum(mask_det1) < 50000:
                self.Lleft.append(1)
            else:
                self.Lleft.append(0)
            self.Lleft.pop(0)
            if sum(self.Lleft) > 6 and self.isLeftStart is False:
                self.displayMessage("工人开始左方装载侧板")
                self.isLeftStart = True
                self.putTextStart_time = time.time()
                self.da.insert_action_("zhuangjiLEFT", 0)
            if sum(self.Lleft) < 2 and self.isLeftStart is True:
                self.displayMessage("工人结束左方装载侧板")
                self.isLeftStart = False
                self.putTextEnd_time_left = time.time()
                self.da.insert_action_("zhuangjiLEFT", 1)
                self.da.update_loss_("action2", 1)

    def shumeiDeal(self):
        global Stype
        while True:
            if Stype == 1 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 1
            if Stype == 2 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "5s保养"
                self.displayMessage(message)
                self.stype = 2
            if Stype == 3 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + ""
                self.displayMessage(message)
                self.stype = 3
            if Stype == 4 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 4
            if Stype == 5 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 5
            if Stype == 6 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 6
            if Stype == 0:
                if self.stype == 1:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 2:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束5s!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 3:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 4:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 5:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 6:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人吃饭!"
                    self.stype = 0
                    self.displayMessage(message)

            time.sleep(0.06)

    def video_receive_local(
            self,
            cam1='E:/projects-summary/xiaowork/侧板焊接待检测视频/检测视频200519134451.mp4',
            cam2='E:\\剪辑\\zhuangji\\ch11_20171221084313 00_09_06-00_10_21~2.mp4',
            time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''

        self.left_cam = cv2.VideoCapture(cam1)
        ret_1, frame_1 = self.left_cam.read()

        # 无法重复播放
        # preCamPath = cam1
        # while True:
        #
        #     self.frame_left = frame_1
        #     if ret_1 is False:
        #         self.left_cam = cv2.VideoCapture(cam1)
        #     if self.CamPath != "" and self.CamPath != preCamPath:
        #         self.left_cam = cv2.VideoCapture(self.CamPath)
        #         preCamPath = self.CamPath
        #     ret_1, frame_1 = self.left_cam.read()
        #     if time_flag is True:
        #         time.sleep(0.04)

        # 优化版本
        while True:
            self.frame_left = frame_1
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if self.CamPath != "" and self.isCamChanged:
                self.left_cam = cv2.VideoCapture(self.CamPath)
                self.isCamChanged = False
            ret_1, frame_1 = self.left_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''
        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if self.type_l == 'work':
            #    cv2.rectangle(frame_change, (self.X_l, self.Y_l), (self.X_l + 100, self.Y_l + 100), (0, 255, 0), 4)
            frame_change = putChineseText.cv2ImgAddText(
                frame_change, "生产操作行为的自动识别(侧板焊接车间)", 50, 30, (0, 0, 0), 50)
            if self.isActionStartUP is True:
                cv2.rectangle(frame_change, (540 + int(self.x1UP * 0.625),
                                             140 + int(self.y1UP * 0.625)),
                              (540 + int(self.x2UP * 0.625),
                               140 + int(self.y2UP * 0.625)), (255, 0, 0), 6)
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在上方清理焊嘴", 140, 60)

            if self.isActionStartDOWN is True:
                cv2.rectangle(frame_change, (int(self.X1DOWN * 0.721) + 680,
                                             int(self.Y1DOWN * 0.721) + 250),
                              (int(self.X2DOWN * 0.721) + 680,
                               int(self.Y2DOWN * 0.721) + 250), (255, 0, 0), 6)
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在下方清理焊嘴", 140, 60)

            if self.isLeftStart is True:
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    cv2.rectangle(frame_change, (0, 150), (300, 720),
                                  (255, 255, 0), 6)
                    cv2.circle(frame_change, (150, 435), 6, (255, 0, 0), 20)

                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在左方装载侧板", 140, 60)

            if self.isRightStart is True:
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    cv2.rectangle(frame_change, (880, 100), (1080, 380),
                                  (255, 255, 0), 6)
                    cv2.circle(frame_change, (980, 240), 6, (255, 0, 0), 20)
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在右方装载侧板", 140, 60)

            # 投入结束文字

            if self.isLeftStart is False:
                if self.putTextEnd_time_left is not None and time.time(
                ) - self.putTextEnd_time_left > 0 and time.time(
                ) - self.putTextEnd_time_left < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束左方装载侧板", 140, 60)

            if self.isRightStart is False:
                if self.putTextEnd_time_right is not None and time.time(
                ) - self.putTextEnd_time_right > 0 and time.time(
                ) - self.putTextEnd_time_right < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束右方装载侧板", 140, 60)

            if self.isActionStartDOWN is False:
                if self.putTextEnd_time_down is not None and time.time(
                ) - self.putTextEnd_time_down > 0 and time.time(
                ) - self.putTextEnd_time_down < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束下方清理焊嘴", 140, 60)

            if self.isActionStartUP is False:
                if self.putTextEnd_time_up is not None and time.time(
                ) - self.putTextEnd_time_up > 0 and time.time(
                ) - self.putTextEnd_time_up < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束上方清理焊嘴", 140, 60)

            frame_resize = cv2.resize(frame_change, (360, 240),
                                      interpolation=cv2.INTER_AREA)

            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1],
                                 frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        if self.frame_left is not None:
            label_show_left(self.frame_left)

    def draw(self):
        '''
        展示图标
        :return:
        '''
        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            loss_data = self.da.select_loss()
            sum1 = sum(loss_data)
            loss_data /= sum1
            fp.plot(*tuple(loss_data))
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            self.da.update_oee()
            oee = Figure_OEE()
            l_eff = self.da.select_oee()
            oee.plot(*tuple(l_eff))  # 参数
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            loss_data = self.da.select_loss()
            loss.plot(*tuple(loss_data))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        # def draw_mt():  # 绘制耗材使用图
        #     mt = Figure_MT()
        #     mt.plot(*(4, 5, 3))
        #     graphicscene_mt = QtGui.QGraphicsScene()
        #     graphicscene_mt.addWidget(mt.canvas)
        #     self.ui.graphicsView_MT.setScene(graphicscene_mt)
        #     self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        # draw_mt()
        draw_oee()

    def video_recog(self):
        '''
        视频识别部分
        :return:
        '''
        if self.isWebCam:
            return
        self.totaltime += 1
        frame_left = self.frame_left  # 原始彩色图,左边摄像头
        frame_left_gray = cv2.cvtColor(frame_left,
                                       cv2.COLOR_BGR2GRAY)  # 原始图的灰度图

        def video_recog_left():
            img = frame_left
            spark, x, y = self.vision.find_spark(img)
            self.q.enqueue(spark)
            # print(spark)
            if spark and x != 1070:
                self.type_l = 'work'
                self.X_l = x
                self.Y_l = y
            else:
                self.type_l = ''

            if spark or True in self.q.queue:  # 如果一段间隔时间内不断有火花(和机器移动,稍后完成),则说明机器必定处于工作状态
                self.one_static_time = 0  # 恢复到运动后,一次静止时间重新清零
                self.work_time += 1
                self.is_work = True

                if self.work_time % 20 == 0:
                    if x != 1070:
                        self.displayMessage("机器正在工作")
                if self.work_time % 60 == 0:
                    self.da.update_loss_("action4", 1)
            else:
                # ******* 截图
                self.is_work = False
                self.one_static_time += 1  # 一次静止时间

                if self.one_static_time % 20 == 0:
                    self.da.update_loss_("action3", 1)
                # ********

                self.action = ThreadedTCPRequestHandler.action  # 键盘操作
                if self.action is not None:  # 往面板上写当前由于什么原因导致机器静止
                    if self.pre_action is None:
                        pass

                if self.action_video is not None:
                    if self.pre_action_video is None:
                        pass

        video_recog_left()
        self.pre_action = self.action
        self.pre_action_video = self.action_video

    def data_read(self):
        pass

    def displayMessage(self, message):

        self.ui.textBrowser.append(
            '[' +
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) +
            '] ' + message)
def main(experiment_name,
         optimizer,
         output_directory_root="experiments/resnet18_logistic_cifar10",
         epochs=60,
         batch_size=512,
         num_workers=1):

    output_directory = os.path.join(output_directory_root, experiment_name)
    if not os.path.isdir(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Setup regular log file + tensorboard
    logfile_path = os.path.join(output_directory, "logfile.txt")
    setup_logger_tqdm(logfile_path)

    tensorboard_log_directory = os.path.join("runs",
                                             "resnet18_logistic_cifar10",
                                             experiment_name)
    tensorboard_summary_writer = SummaryWriter(
        log_dir=tensorboard_log_directory)

    # Choose Training Device
    use_cuda = torch.cuda.is_available()
    logger.info(f"CUDA Available? {use_cuda}")
    device = "cuda" if use_cuda else "cpu"

    # Datasets and Loaders
    train_set_loader, test_set_loader = get_data_loaders(
        batch_size, num_workers)

    # Create Model & Optimizer
    model = torchvision.models.resnet18(pretrained=True)
    for param in model.parameters():
        param.requires_grad = False
    num_classes = 10
    model.fc = nn.Linear(model.fc.in_features, 10)
    model.to(device)
    optimizer = optimizer(model.parameters())

    logger.info("=========== Commencing Training ===========")
    logger.info(f"Epoch Count: {epochs}")
    logger.info(f"Batch Size: {batch_size}")

    # Load Checkpoint
    checkpoint_file_path = os.path.join(output_directory, "checkpoint.pth")
    start_epoch = 0
    if os.path.exists(checkpoint_file_path):
        logger.info("Checkpoint Found - Loading!")

        checkpoint = torch.load(checkpoint_file_path)
        logger.info(f"Last completed epoch: {checkpoint['epoch']}")
        logger.info(f"Average Train Loss: {checkpoint['train_loss']}")
        logger.info(f"Top-1 Train Accuracy: {checkpoint['train_accuracy']}")
        logger.info(f"Top-1 Test Accuracy: {checkpoint['test_accuracy']}")
        start_epoch = checkpoint["epoch"] + 1
        logger.info(f"Resuming at epoch {start_epoch}")

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
    else:
        logger.info("No checkpoint found, starting from scratch.")

    # Training Loop
    t = Timer()
    for epoch in range(start_epoch, epochs):
        t.start()
        logger.info("-" * 10)
        logger.info(f"Epoch {epoch}")
        logger.info("-" * 10)

        train_loss, train_accuracy = train_model(device, model,
                                                 train_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("train_loss", train_loss, epoch)
        tensorboard_summary_writer.add_scalar("train_accuracy", train_accuracy,
                                              epoch)

        test_accuracy = test_model(device, model, test_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("test_accuracy", test_accuracy,
                                              epoch)

        # Save Checkpoint
        logger.info("Saving checkpoint.")
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_loss': train_loss,
                'train_accuracy': train_accuracy,
                'test_accuracy': test_accuracy
            }, checkpoint_file_path)

        elapsed_time = t.stop()
        logger.info(f"End of epoch {epoch}, took {elapsed_time:0.4f} seconds.")
        logger.info(f"Average Train Loss: {train_loss}")
        logger.info(f"Top-1 Train Accuracy: {train_accuracy}")
        logger.info(f"Top-1 Test Accuracy: {test_accuracy}")
        logger.info("")
Example #6
0
class XioPlayVideo(QtGui.QWidget):
    '''这个类为主程序类
    '''
    def __init__(self):
        super(XioPlayVideo, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)
        self.left_cam = cv2.VideoCapture('./videos/left_cam.mp4')  # 左摄像头
        self.right_cam = cv2.VideoCapture('./videos/right_cam.mp4')
        self.frame_left = None
        self.frame_right = None

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()
        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()
        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()
        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()
        self.thread_tcp = None  # 该线程用来完成tcp,未写完

    def video_receive_local(self,
                            cam1='./videos/left_cam.mp4',
                            cam2='./videos/right_cam.mp4',
                            time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''
        if self.left_cam.isOpened() is False:
            self.left_cam = cv2.VideoCapture(cam1)
        if self.right_cam.isOpened() is False:
            self.right_cam = cv2.VideoCapture(cam2)
        ret_1, frame_1 = self.left_cam.read()
        ret_2, frame_2 = self.right_cam.read()
        while True:
            self.frame_left = frame_1
            self.frame_right = frame_2
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if ret_2 is False:
                self.right_cam = cv2.VideoCapture(cam2)
            ret_1, frame_1 = self.left_cam.read()
            ret_1, frame_2 = self.right_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''
        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resize = cv2.resize(frame_change, (500, 300),
                                      interpolation=cv2.INTER_AREA)
            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1],
                                 frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        def label_show_right(frame, label=self.ui.label_2):  # 右空间Lable播放
            label_show_left(frame, label)

        if self.frame_left is not None:
            label_show_left(self.frame_left)
        if self.frame_right is not None:
            label_show_right(self.frame_right)

    def video_recog(self):
        pass

    def data_read(self):
        pass
class XioAll(QtGui.QWidget):
    '''这个类为主程序类
    '''
    HOST = 'localhost'
    PORT = 8081
    TOTAL = 0
    isStatic = True
    action = None
    pre_action = None
    action_video = None # 视频内能识别
    pre_action_video = None

    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()
        # 若日期发生改变,自行插入全零数据
        da = data_access.EquipmentTimeData()  # 对损失项统计表进行操作
        result_loss = da.select_("select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            da.update('insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                      '("%s",%d,%d,%d,%d,%d,%d)' % (current_time, 0, 0, 0, 0, 0, 0))
        else:
            pass

        da_oee = data_access.OEEData()  # 对oee实时利用率进行统计
        result_oee = da_oee.select_('select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            da_oee.update_('insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                           '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass
        self.thread_figure = Timer('updatePlay()', sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

        self.server = ThreadedTCPServer((self.HOST, self.PORT), ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'), self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()', sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'), self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()', sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'), self.data_read)
        self.thread_data.start()

    def video_receive_local(self, cam1='./videos/left_cam.mp4', cam2='./videos/right_cam.mp4', time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''
        self.left_cam = cv2.VideoCapture(cam1)
        self.right_cam = cv2.VideoCapture(cam2)
        ret_1, frame_1 = self.left_cam.read()
        ret_2, frame_2 = self.right_cam.read()
        while True:
            self.frame_left = frame_1
            self.frame_right = frame_2
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if ret_2 is False:
                self.right_cam = cv2.VideoCapture(cam2)
            ret_1, frame_1 = self.left_cam.read()
            ret_1, frame_2 = self.right_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''

        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resize = cv2.resize(frame_change, (360, 240), interpolation=cv2.INTER_AREA)
            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1], frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        def label_show_right(frame, label=self.ui.label_2):  # 右空间Lable播放
            label_show_left(frame, label)

        if self.frame_left is not None:
            label_show_left(self.frame_left)
        if self.frame_right is not None:
            label_show_right(self.frame_right)

    def draw(self):
        '''
        展示图标
        :return:
        '''

        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            da = data_access.EquipmentData()
            result = da.select()
            fp.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))  # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            current_time = datetime.datetime.now().strftime('%Y-%m-%d')
            lossTime = data_access.EquipmentTimeData()
            result_loss = lossTime.select_("select * from loss ORDER BY SJ DESC limit 1")
            zongshijian = time.strftime('%H:%M:%S', time.localtime(time.time()))
            huanxing = result_loss[0][1]
            dailiao = result_loss[0][2]
            shebeiguzhang = result_loss[0][3]
            tingzhi = result_loss[0][4]
            # qitashijian=result[0][5]
            # kongyunzhuan=result[0][6]
            fuheshijian = (int(zongshijian.split(':')[0]) - 8) * 3600 + int(zongshijian.split(':')[1]) * 60 + int(
                zongshijian.split(':')[2]) - tingzhi
            shijijiagong_1 = fuheshijian - huanxing - dailiao - shebeiguzhang
            eff = int(shijijiagong_1 / fuheshijian * 100)  # 计算效率
            print(eff)

            hour = time.localtime()[3]  # 实时更新
            da_oee = data_access.OEEData()
            da_oee.update_("update oee_date set O" + str(hour) + "=" + str(eff) + ' where SJC="' + current_time + '"')
            L_eff = []
            oee = Figure_OEE()
            da = data_access.OEEData()
            result = da.select()
            hour = time.localtime()[3]
            if hour < 20:
                for i in range(1, hour - 6):
                    L_eff.append(result[-1][i])
            oee.plot(*tuple(L_eff))  # 参数
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            da = data_access.EquipmentTimeData()
            result = da.select()
            loss.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        def draw_mt():  # 绘制耗材使用图
            mt = Figure_MT()
            mt.plot()
            graphicscene_mt = QtGui.QGraphicsScene()
            graphicscene_mt.addWidget(mt.canvas)
            self.ui.graphicsView_MT.setScene(graphicscene_mt)
            self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        draw_mt()
        draw_oee()

    def video_recog(self):
        '''
        视频识别部分
        :return:
        '''
        frame_left = self.frame_left  # 原始彩色图,左边摄像头
        frame_left_gray = cv2.cvtColor(frame_left, cv2.COLOR_BGR2GRAY)  # 原始图的灰度图

        # frame_right = self.frame_left  # 原始彩色图
        # frame_right_gray = cv2.cvtColor(frame_right, cv2.COLOR_BGR2GRAY)

        def video_recog_left():
            img = frame_left
            spark = self.vision.find_spark(img)
            self.q.enqueue(spark)
            # print(spark)
            if spark or True in self.q.queue:  # 如果一段间隔时间内不断有火花(和机器移动,稍后完成),则说明机器必定处于工作状态
                #print('work')
                self.action_video = None
                self.one_static_time = 0  # 恢复到运动后,一次静止时间重新清零
            else:
                # ******* 截图
                self.one_static_time += 1  # 一次静止时间
                if self.one_static_time % 60 == 0:
                    print('start or static')
                    print('静止了,往catch文件夹中查看原因')
                    t = time.localtime()
                    hour = t[3]
                    mini = t[4]
                    seco = t[5]
                    filename = str(hour) + '-' + str(mini) + '-' + str(seco)
                    cv2.imwrite('./catch/' + filename + '.jpg', img)
                # ********

                self.action = ThreadedTCPRequestHandler.action # 键盘操作
                if self.action is not None:  # 往面板上写当前由于什么原因导致机器静止
                    if self.pre_action is None:
                        print(self.action)
                        message = '[' + time.strftime('%Y-%m-%d %H:%M:%S',
                                                      time.localtime(time.time())) + ']' + str(self.action)
                        self.displayMessage(message)

                if self.vision.tiaoshi(frame_left_gray):
                    self.action_video = 'tiaoshi'
                if self.action_video is not None:
                    if self.pre_action_video is None:
                        print(self.action_video)
                        message = '[' + time.strftime('%Y-%m-%d %H:%M:%S',
                                                      time.localtime(time.time())) + ']' + str(self.action_video)
                        self.displayMessage(message)



        def video_recog_right():  # 以后用来做换气瓶等的实现
            pass

        video_recog_left()
        video_recog_right()
        self.pre_action = self.action
        self.pre_action_video = self.action_video

    def data_read(self):
        pass

    def displayMessage(self, message):
        self.ui.textBrowser.append(message)
Example #8
0
def main(device, mp_args, dataloader_func, model, optimizer_callback,
         output_directory, tensorboard_log_directory, epochs):

    global_rank = mp_args.nr * mp_args.gpus + device
    dist.init_process_group(backend='nccl',
                            init_method='env://',
                            world_size=mp_args.world_size,
                            rank=global_rank)

    output_directory = os.path.join(output_directory, f"rank_{global_rank}")
    if not os.path.isdir(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Setup regular log file
    logfile_path = os.path.join(output_directory, "logfile.txt")
    setup_logger_tqdm(logfile_path)

    # Setup TensorBoard logging
    tensorboard_log_directory = os.path.join(tensorboard_log_directory,
                                             f"rank_{global_rank}")
    tensorboard_summary_writer = SummaryWriter(
        log_dir=tensorboard_log_directory)

    # Dataloaders
    train_set_loader, test_set_loader = dataloader_func(
        mp_args.world_size, global_rank)

    # Model & Optimizer
    model.to(device)
    optimizer = optimizer_callback(model)
    model = nn.parallel.DistributedDataParallel(model, device_ids=[device])

    logger.info(f"Epoch Count: {epochs}")

    # Load Checkpoint
    checkpoint_file_path = os.path.join(output_directory, "checkpoint.pth")
    start_epoch = 0
    if os.path.exists(checkpoint_file_path):
        logger.info("Checkpoint Found - Loading!")

        checkpoint = torch.load(checkpoint_file_path)
        logger.info(f"Last completed epoch: {checkpoint['epoch']}")
        logger.info(f"Average Train Loss: {checkpoint['train_loss']}")
        logger.info(f"Top-1 Train Accuracy: {checkpoint['train_accuracy']}")
        logger.info(f"Top-1 Test Accuracy: {checkpoint['test_accuracy']}")
        start_epoch = checkpoint["epoch"] + 1
        logger.info(f"Resuming at epoch {start_epoch}")

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
    else:
        logger.info("No checkpoint found, starting from scratch.")

    # Training Loop
    t = Timer()
    #progress = tqdm(total=epochs, initial=start_epoch, desc="Epochs")
    for epoch in range(start_epoch, epochs):
        t.start()
        logger.info(f"Commence EPOCH {epoch}")

        # Train
        train_loss, train_accuracy = train_model(device, model,
                                                 train_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("train_loss", train_loss, epoch)
        tensorboard_summary_writer.add_scalar("train_accuracy", train_accuracy,
                                              epoch)

        # Test
        test_accuracy = test_model(device, model, test_set_loader)
        tensorboard_summary_writer.add_scalar("test_accuracy", test_accuracy,
                                              epoch)

        # Save Checkpoint
        logger.info("Saving checkpoint.")
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_loss': train_loss,
                'train_accuracy': train_accuracy,
                'test_accuracy': test_accuracy
            }, checkpoint_file_path)

        elapsed_time = t.stop()
        logger.info(f"End of epoch {epoch}, took {elapsed_time:0.4f} seconds.")
        logger.info(f"Average Train Loss: {train_loss}")
        logger.info(f"Top-1 Train Accuracy: {train_accuracy}")
        logger.info(f"Top-1 Test Accuracy: {test_accuracy}")
Example #9
0
def main(dataloader_func,
         model,
         optimizer_callback,
         output_directory,
         tensorboard_log_directory,
         lr_scheduler=None,
         epochs=150):

    if not os.path.isdir(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Setup regular log file
    logfile_path = os.path.join(output_directory, "logfile.txt")
    setup_logger_tqdm(logfile_path)

    # Setup TensorBoard logging
    tensorboard_summary_writer = SummaryWriter(
        log_dir=tensorboard_log_directory)

    # Choose Training Device
    use_cuda = torch.cuda.is_available()
    logger.info(f"CUDA Available? {use_cuda}")
    device = "cuda" if use_cuda else "cpu"

    # Dataloaders
    train_set_loader, test_set_loader = dataloader_func()

    # Model & Optimizer
    model.to(device)
    optimizer = optimizer_callback(model)
    if lr_scheduler:
        lr_scheduler = lr_scheduler(optimizer)

    logger.info(f"Epoch Count: {epochs}")

    # Load Checkpoint
    checkpoint_file_path = os.path.join(output_directory, "checkpoint.pth")
    start_epoch = 0
    if os.path.exists(checkpoint_file_path):
        logger.info("Checkpoint Found - Loading!")

        checkpoint = torch.load(checkpoint_file_path)
        logger.info(f"Last completed epoch: {checkpoint['epoch']}")
        logger.info(f"Average Train Loss: {checkpoint['train_loss']}")
        logger.info(f"Top-1 Train Accuracy: {checkpoint['train_accuracy']}")
        logger.info(f"Top-1 Test Accuracy: {checkpoint['test_accuracy']}")
        start_epoch = checkpoint["epoch"] + 1
        logger.info(f"Resuming at epoch {start_epoch}")

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        if lr_scheduler:
            lr_scheduler.load_state_dict(checkpoint["lr_scheduler_state_dict"])
    else:
        logger.info("No checkpoint found, starting from scratch.")

    # Training Loop
    t = Timer()
    for epoch in range(start_epoch, epochs):
        t.start()
        logger.info(f"Commence EPOCH {epoch}")

        # Train
        train_loss, train_accuracy = train_model(device, model,
                                                 train_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("train_loss", train_loss, epoch)
        tensorboard_summary_writer.add_scalar("train_accuracy", train_accuracy,
                                              epoch)

        # Test
        test_accuracy = test_model(device, model, test_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("test_accuracy", test_accuracy,
                                              epoch)

        scheduler_dict = None
        if lr_scheduler:
            lr_scheduler.step()
            scheduler_dict = lr_scheduler.state_dict()

        # Save Checkpoint
        logger.info("Saving checkpoint.")
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'lr_scheduler_state_dict': scheduler_dict,
                'train_loss': train_loss,
                'train_accuracy': train_accuracy,
                'test_accuracy': test_accuracy
            }, checkpoint_file_path)

        elapsed_time = t.stop()
        logger.info(f"End of epoch {epoch}, took {elapsed_time:0.4f} seconds.")
        logger.info(f"Average Train Loss: {train_loss}")
        logger.info(f"Top-1 Train Accuracy: {train_accuracy}")
        logger.info(f"Top-1 Test Accuracy: {test_accuracy}")
    def train_one_epoch(self, model, dataloader, optimizer, scheduler, num_epochs, max_grad_norm=None,
                        debugging=False):
        """Train the model for one epoch."""
        model.train()
        timer = Timer()

        print(
            ("{:25}" + "|" + "{:^15}" * (3 + len(self.early_stopping_metrics)) + "|").format(
                "", "l1_loss", "l2_loss", "l3_loss", *self.early_stopping_metrics)
        )

        total = 10 if debugging else len(dataloader)
        with tqdm(dataloader, total=total) as t:
            if num_epochs is not None:
                description = f"Training ({self.epoch}/{num_epochs})"
            else:
                description = "Training"
            t.set_description(description)

            for i, data in enumerate(t):
                timer.start()

                data = to_device(data, self.device)
                optimizer.zero_grad()

                # Forward
                output = model(**data)
                losses = output["losses"]

                # Calculate batch metrics
                metric = compute_metrics_from_inputs_and_outputs(
                    inputs=data, outputs=output, tokenizer=self.tokenizer, save_csv_path=None)
                losses.update(metric)

                # Update tqdm with training information
                to_tqdm = []  # update tqdm
                for loss_type in ["l1_cls_loss", "l2_cls_loss", "l3_cls_loss", *self.early_stopping_metrics]:
                    loss_n = losses[loss_type]

                    if isinstance(loss_n, torch.Tensor) and torch.isnan(loss_n):
                        to_tqdm.append("nan")
                    else:
                        to_tqdm.append(f"{loss_n.item():.3f}")

                des = (
                    "{:25}" + "|" + "{:^15}" * (3 + len(self.early_stopping_metrics)) + "|"
                ).format(description, *to_tqdm)
                t.set_description(des)

                # Backward
                losses["total_loss"].backward()
                if max_grad_norm is not None:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
                optimizer.step()
                if scheduler is not None:
                    scheduler.step()

                timer.end()

                # Break when reaching 10 iterations when debugging
                if debugging and i == 9:
                    break

        logger.info(f"{description} took {timer.get_total_time():.2f}s.")
        return
Example #11
0
def main(carrier_path,
         marking_network,
         target_network,
         target_checkpoint,
         batch_size=256,
         num_workers=1,
         align=True,
         test_set_loader=None):

    # Setup Device
    use_cuda = torch.cuda.is_available()
    logger.info(f"CUDA Available? {use_cuda}")
    device = torch.device("cuda" if use_cuda else "cpu")

    # Load Carrier
    carrier = torch.load(carrier_path).numpy()

    t = Timer()
    t.start()

    # Align spaces (Or Not)
    W = target_checkpoint["model_state_dict"]["fc.weight"].cpu().numpy()
    if align:
        logger.info(
            "Aligning marking and target network feature space with least squares"
        )

        marking_network.to(device)
        marking_network.eval()

        target_network.to(device)
        target_network.eval()

        # Setup Dataloader
        if not test_set_loader:
            test_set_loader = get_data_loader(batch_size, num_workers)

        logger.info(
            "Extracting image features from marking and target networks.")
        features_marking, _ = extract_features(test_set_loader,
                                               marking_network,
                                               device,
                                               verbose=False)
        features_target, _ = extract_features(test_set_loader,
                                              target_network,
                                              device,
                                              verbose=False)
        features_marking = features_marking.numpy()
        features_target = features_target.numpy()

        X, residuals, rank, s = np.linalg.lstsq(features_marking,
                                                features_target)
        logger.info(
            "Norm of residual: %.4e" %
            np.linalg.norm(np.dot(features_marking, X) - features_target)**2)
        W = np.matmul(W, X.T)

    # Computing scores
    W /= np.linalg.norm(W, axis=1, keepdims=True)
    scores = np.sum(W * carrier, axis=1)
    #print(f"SCORES: {scores}")

    logger.info("Mean p-value is at %d times sigma" %
                int(scores.mean() * np.sqrt(W.shape[0] * carrier.shape[1])))
    logger.info("Epoch of the model: %d" % target_checkpoint["epoch"])

    p_vals = [cosine_pvalue(c, d=carrier.shape[1]) for c in list(scores)]
    #print(f"Cosine P values: {p_vals}")
    #print(f"np.sum(np.log(p_vals)): {np.sum(np.log(p_vals))}")

    #logger.info(p_vals)
    combined_pval = combine_pvalues(p_vals)[1]
    logger.info(f"log10(p)={np.log10(combined_pval)}")

    elapsed_time = t.stop()
    logger.info("Total took %.2f" % (elapsed_time))

    return (scores, p_vals, combined_pval)
    def train_one_epoch(self,
                        model,
                        dataloader,
                        optimizer,
                        scheduler,
                        num_epochs,
                        max_grad_norm=None,
                        debugging=False):
        """Train the model for one epoch."""
        model.train()
        timer = Timer()

        print(("{:25}" + "|" + "{:^45}" + "|" + "{:^45}" + "|" + "{:^45}" +
               "|").format("", "food", "service", "price"))
        print(("{:25}" + "|" + "{:^15}" * 3 + "|" + "{:^15}" * 3 + "|" +
               "{:^15}" * 3 + "|").format("", "mse_loss", "existence_loss",
                                          "acc", "mse_loss", "existence_loss",
                                          "acc", "mse_loss", "existence_loss",
                                          "acc"))

        total = 10 if debugging else len(dataloader)
        with tqdm(dataloader, total=total) as t:
            if num_epochs is not None:
                description = f"Training ({self.epoch}/{num_epochs})"
            else:
                description = "Training"
            t.set_description(description)

            for i, data in enumerate(t):
                timer.start()

                data = to_device(data, self.device)
                optimizer.zero_grad()

                # Forward
                output = model(**data, is_training=True)
                losses = output["losses"]

                # Calculate batch accuracy
                acc = compute_metrics_from_inputs_and_outputs(
                    inputs=data,
                    outputs=output,
                    output_acc=True,
                    confidence_threshold=self.config["evaluation"]
                    ["confidence_threshold"])
                losses.update(acc)

                # Update tqdm with training information
                to_tqdm = []  # update tqdm
                for name in ["food", "service", "price"]:
                    for loss_type in ["score_loss", "existence_loss", "acc"]:
                        n = f"{name}_{loss_type}"
                        loss_n = losses[n]

                        if (not isinstance(
                                loss_n, torch.Tensor)) or torch.isnan(loss_n):
                            to_tqdm.append("nan")
                        else:
                            to_tqdm.append(f"{loss_n.item():.3f}")

                des = ("{:25}" + "|" + "{:^15}" * 3 + "|" + "{:^15}" * 3 +
                       "|" + "{:^15}" * 3 + "|").format(description, *to_tqdm)
                t.set_description(des)

                # Backward
                losses["total_loss"].backward()
                if max_grad_norm is not None:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                                   max_grad_norm)
                optimizer.step()
                if scheduler is not None:
                    scheduler.step()

                timer.end()

                # Break when reaching 10 iterations when debugging
                if debugging and i == 9:
                    break

        logger.info(f"{description} took {timer.get_total_time():.2f}s.")
        return
Example #13
0
def main(device,
         mp_args,
         experiment_name,
         optimizer,
         output_directory_root="experiments/resnet18_distributed",
         lr_scheduler=None,
         epochs=150,
         batch_size=512,
         num_workers=1):

    global_rank = mp_args.nr * mp_args.gpus + device
    dist.init_process_group(backend='nccl',
                            init_method='env://',
                            world_size=mp_args.world_size,
                            rank=global_rank)

    output_directory = os.path.join(output_directory_root, experiment_name,
                                    f"rank_{global_rank}")
    if not os.path.isdir(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Setup regular log file + tensorboard
    logfile_path = os.path.join(output_directory, "logfile.txt")
    setup_logger_tqdm(logfile_path)

    tensorboard_log_directory = os.path.join("runs", "resnet18_distributed",
                                             experiment_name,
                                             f"rank_{global_rank}")
    tensorboard_summary_writer = SummaryWriter(
        log_dir=tensorboard_log_directory)

    # Datasets and Loaders
    train_set_loader, test_set_loader = get_data_loaders(
        mp_args.world_size, global_rank, batch_size, num_workers)

    # Create Model & Optimizer (uses Partial Functions)
    model = torchvision.models.resnet18(pretrained=False, num_classes=10)
    model.to(device)
    optimizer = optimizer(model.parameters())
    model = nn.parallel.DistributedDataParallel(model, device_ids=[device])

    if lr_scheduler:
        lr_scheduler = lr_scheduler(optimizer)

    logger.info("=========== Commencing Training ===========")
    logger.info(f"Epoch Count: {epochs}")
    logger.info(f"Batch Size: {batch_size}")

    # Load Checkpoint
    checkpoint_file_path = os.path.join(output_directory, "checkpoint.pth")
    start_epoch = 0
    if os.path.exists(checkpoint_file_path):
        logger.info("Checkpoint Found - Loading!")
        checkpoint = torch.load(checkpoint_file_path)
        logger.info(f"Last completed epoch: {checkpoint['epoch']}")
        logger.info(f"Average Train Loss: {checkpoint['train_loss']}")
        logger.info(f"Top-1 Train Accuracy: {checkpoint['train_accuracy']}")
        logger.info(f"Top-1 Test Accuracy: {checkpoint['test_accuracy']}")
        start_epoch = checkpoint["epoch"] + 1
        logger.info(f"Resuming at epoch {start_epoch}")

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        if lr_scheduler:
            lr_scheduler.load_state_dict(checkpoint["lr_scheduler_state_dict"])
    else:
        logger.info("No checkpoint found, starting from scratch.")

    # Training Loop
    t = Timer()
    for epoch in range(start_epoch, epochs):
        t.start()
        logger.info("-" * 10)
        logger.info(f"Epoch {epoch}")
        logger.info("-" * 10)

        train_loss, train_accuracy = train_model(device, model,
                                                 train_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("train_loss", train_loss, epoch)
        tensorboard_summary_writer.add_scalar("train_accuracy", train_accuracy,
                                              epoch)

        test_accuracy = test_model(device, model, test_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("test_accuracy", test_accuracy,
                                              epoch)

        scheduler_dict = None
        if lr_scheduler:
            lr_scheduler.step()
            scheduler_dict = lr_scheduler.state_dict()

        # Save Checkpoint
        logger.info("Saving checkpoint.")
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'lr_scheduler_state_dict': scheduler_dict,
                'train_loss': train_loss,
                'train_accuracy': train_accuracy,
                'test_accuracy': test_accuracy
            }, checkpoint_file_path)

        elapsed_time = t.stop()
        logger.info(f"End of epoch {epoch}, took {elapsed_time:0.4f} seconds.")
        logger.info(f"Average Train Loss: {train_loss}")
        logger.info(f"Top-1 Train Accuracy: {train_accuracy}")
        logger.info(f"Top-1 Test Accuracy: {test_accuracy}")
        logger.info("")
Example #14
0
class XioAll(QtGui.QWidget):
    '''这个类为主程序类
    '''
    HOST = 'localhost'
    PORT = 8081
    TOTAL = 0
    isStatic = True
    Shumei = None
    action = None
    pre_action = None
    action_video = None  # 视频内能识别
    pre_action_video = None

    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.stype = 0
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()

        self.CamPath = ""

        # 若日期发生改变,自行插入全零数据
        da = data_access.EquipmentTimeData()  # 对损失项统计表进行操作
        result_loss = da.select_("select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            da.update(
                'insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                '("%s",%d,%d,%d,%d,%d,%d)' % (current_time, 0, 0, 0, 0, 0, 0))
        else:
            pass

        da_oee = data_access.OEEData()  # 对oee实时利用率进行统计
        result_oee = da_oee.select_(
            'select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            da_oee.update_(
                'insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass

        self.thread_figure = Timer('updatePlay()',
                                   sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'),
                     self.draw)
        self.thread_figure.start()

        # 按钮功能
        self.connect(self.ui.fileSelectButton, QtCore.SIGNAL('clicked()'),
                     self.fileSelect)
        self.connect(self.ui.mailSenderButton, QtCore.SIGNAL('clicked()'),
                     self.mailSend)
        self.connect(self.ui.confirmDateButton, QtCore.SIGNAL('clicked()'),
                     self.displayMonthData)
        self.connect(self.ui.mailConfirm, QtCore.SIGNAL('clicked()'),
                     self.confirmMail)

        self.server = ThreadedTCPServer(
            (self.HOST, self.PORT),
            ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()

        self.thread_shumei = threading.Thread(target=self.shumeiDeal)
        self.thread_shumei.start()

        self.thread_control = Timer('updatePlay()',
                                    sleep_time=10)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_control, QtCore.SIGNAL('updatePlay()'),
                     self.control_judge)
        self.thread_control.start()

        # 12-25
        self.thread_recogtiaoshi = Timer('updatePlay()',
                                         sleep_time=0.5)  # 该线程用来每隔0.5秒分析图像
        self.connect(self.thread_recogtiaoshi, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogtiaoshi)
        self.thread_recogtiaoshi.start()

        # self.thread_recogbottle = Timer('updatePlay()', sleep_time=0.5)  # 该线程用来每隔0.5秒分析图像
        # self.connect(self.thread_recogbottle, QtCore.SIGNAL('updatePlay()'), self.video_recogbottle)
        # self.thread_recogbottle.start()

        self.thread_recogzhuangji = Timer('updatePlay()',
                                          sleep_time=0.1)  # 该线程用来每隔0.5秒分析图像
        self.connect(self.thread_recogzhuangji, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogzhuangji)
        self.thread_recogzhuangji.start()

        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        # 调试
        self.machinedown_base = cv2.imread(
            "images/tiaoshiimages/machinedown_base.jpg")
        self.machineup_base = cv2.imread(
            "images/tiaoshiimages/machineup_base.jpg")

        self.machineup_mask = cv2.imread("images/tiaoshiimages/up1.jpg")
        self.machinedown_mask = cv2.imread("images/tiaoshiimages/down1.jpg")

        self.peopleup_mask = cv2.imread("images/tiaoshiimages/handsup.jpg", 0)
        self.peopledown_mask = cv2.imread("images/tiaoshiimages/handsdown.jpg",
                                          0)

        self.peopleup_base = cv2.imread(
            "images/tiaoshiimages/handsup_base.jpg", 0)
        self.peopledown_base = cv2.imread(
            "images/tiaoshiimages/handsdown_base.jpg", 0)

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True
        self.tiaoshitime = 0

        self.isUpStart = False
        self.isDownStart = False
        self.machineLocation = ""
        self.downStartTime = 0
        self.upStartTime = 0
        # 换瓶操作
        # self.bottle_area = cv2.imread("images/bottleimages/bottle.jpg", 0)
        # self.bottle_area = cv2.resize(self.bottle_area, (1280, 720))
        # self.nobottle_base = cv2.imread("images/bottleimages/nobottle_base.jpg", 0)
        # self.nobottle_base = cv2.resize(self.nobottle_base, (1280, 720))
        # self.Lbottle = [0] * 10
        # self.isBottleStart = False
        # self.isJudgeMachineB = True
        self.bottletime = 0
        # 装机操作
        self.mask_right = cv2.imread("images/zhuangjiimages/right.jpg")
        self.mask_left = cv2.imread("images/zhuangjiimages/maskleft.jpg")
        self.left_base = cv2.imread("images/zhuangjiimages/left_base.jpg", 0)
        self.redLower = np.array([26, 43, 46])
        self.redUpper = np.array([34, 255, 255])
        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

    def fileSelect(self):
        absolute_path = QFileDialog.getOpenFileName(self, '视频选择', '.',
                                                    "MP4 files (*.mp4)")

        if self.CamPath != absolute_path:
            self.reFlushDetection()
            self.CamPath = absolute_path
        else:
            self.displayMessage("...未进行选择,视频源路径不变...")

    def reFlushDetection(self):
        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        # 调试
        self.machinedown_base = cv2.imread(
            "images/tiaoshiimages/machinedown_base.jpg")
        self.machineup_base = cv2.imread(
            "images/tiaoshiimages/machineup_base.jpg")

        self.machineup_mask = cv2.imread("images/tiaoshiimages/up1.jpg")
        self.machinedown_mask = cv2.imread("images/tiaoshiimages/down1.jpg")

        self.peopleup_mask = cv2.imread("images/tiaoshiimages/handsup.jpg", 0)
        self.peopledown_mask = cv2.imread("images/tiaoshiimages/handsdown.jpg",
                                          0)

        self.peopleup_base = cv2.imread(
            "images/tiaoshiimages/handsup_base.jpg", 0)
        self.peopledown_base = cv2.imread(
            "images/tiaoshiimages/handsdown_base.jpg", 0)

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True
        self.tiaoshitime = 0

        self.isUpStart = False
        self.isDownStart = False
        self.machineLocation = ""
        self.downStartTime = 0
        self.upStartTime = 0
        # 换瓶操作
        self.bottle_area = cv2.imread("images/bottleimages/bottle.jpg", 0)
        self.bottle_area = cv2.resize(self.bottle_area, (1280, 720))
        self.nobottle_base = cv2.imread(
            "images/bottleimages/nobottle_base.jpg", 0)
        self.nobottle_base = cv2.resize(self.nobottle_base, (1280, 720))
        self.Lbottle = [0] * 10
        self.isBottleStart = False
        self.isJudgeMachineB = True
        self.bottletime = 0
        # 装机操作
        self.mask_right = cv2.imread("images/zhuangjiimages/right.jpg")
        self.mask_left = cv2.imread("images/zhuangjiimages/maskleft.jpg")
        self.left_base = cv2.imread("images/zhuangjiimages/left_base.jpg", 0)
        self.redLower = np.array([26, 43, 46])
        self.redUpper = np.array([34, 255, 255])
        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

        self.displayMessage("......初始化参数成功......")

    def confirmMail(self):
        text = self.ui.mailLineEdit.text()
        lines = str(text)
        lines = lines.split(';')
        for line in lines:
            self.ui.mailTextBroswer.append(line)
        self.ui.mailTextBroswer.append("......邮箱确认完毕,准备发送......")

    def mailSend(self):
        import smtplib
        sender = '*****@*****.**'
        receivers = ['*****@*****.**']
        text = self.ui.mailLineEdit.text()
        lines = str(text)
        lines = lines.split(';')
        for line in lines:
            receivers.append(line)
        message = """From: From Person <*****@*****.**>
        To: To Person <*****@*****.**>
        Subject: SMTP e-mail test

        This is a test e-mail message.
        """
        try:
            smtpObj = smtplib.SMTP()
            smtpObj.connect("smtp.qq.com", 25)
            mail_license = "wuhchbmndrjabgcc"
            print("准备登录")
            smtpObj.login(sender, mail_license)
            print("登录成功!")
            smtpObj.set_debuglevel(1)
            smtpObj.sendmail(sender, receivers, message)
            self.ui.mailTextBroswer.append("......发送邮件成功!......")
        except Exception as e:
            print(e)
            self.ui.mailTextBroswer.append("......发送邮件失败!......")

    def displayMonthData(self):
        self.ui.DateTable.clear()
        da = data_access.DataAccess()

        # 获取月份
        select_date = self.ui.dateEdit.text()
        queryByMonth = "select * from oee_date where date_format(SJC,'%Y-%m')='{}'".format(
            select_date)

        # 取数据正常
        result = da.select_(queryByMonth)
        row = len(result)
        if row == 0:
            self.ui.DateTable.setRowCount(1)
            self.ui.DateTable.setColumnCount(1)
            self.ui.DateTable.setEditTriggers(
                QtGui.QTableWidget.NoEditTriggers)
            self.ui.DateTable.horizontalHeader().setResizeMode(
                QtGui.QHeaderView.Stretch)
            newItem = QtGui.QTableWidgetItem(
                "                    日期 {} 暂无数据".format(
                    select_date))  # 接受str,无法接收int
            textFont = QtGui.QFont("song", 16, QtGui.QFont.Bold)
            newItem.setFont(textFont)

            self.ui.DateTable.setItem(0, 0, newItem)
        else:
            # 表格属性
            self.ui.DateTable.setRowCount(row)
            self.ui.DateTable.setColumnCount(12)
            self.ui.DateTable.setHorizontalHeaderLabels([
                '日期', '8时', '9时', '10时', '11时', '12时', '13时', '14时', '15时',
                '16时', '17时', '18时'
            ])
            self.ui.DateTable.setEditTriggers(
                QtGui.QTableWidget.NoEditTriggers)
            self.ui.DateTable.horizontalHeader().setResizeMode(
                QtGui.QHeaderView.Stretch)

            # 数据处理
            for i in range(row):
                list_data = list(result[i])
                for j in range(12):
                    if j == 0:
                        cnt = str(list_data[j])[5:10]
                    else:
                        cnt = str(int(list_data[j]))
                    newItem = QtGui.QTableWidgetItem(cnt)  # 接受str,无法接收int
                    textFont = QtGui.QFont("song", 12, QtGui.QFont.Bold)
                    newItem.setFont(textFont)
                    self.ui.DateTable.setItem(i, j, newItem)

    def control_judge(self):
        if (time.time() - self.tiaoshitime) > 120:
            self.isJudgeMachineT = True
        if (time.time() - self.bottletime) > 120:
            self.isJudgeMachineB = True
        if (time.time() - self.zhuangjitime) > 120:
            self.is_JudgeRL = True

    def video_recogtiaoshi(self):
        img = self.frame_left
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        imgupcurrent = cv2.bitwise_and(self.machineup_mask, img)
        imgdowncurrent = cv2.bitwise_and(self.machinedown_mask, img)
        updiff = sum(cv2.absdiff(imgupcurrent, self.machineup_base)) * 1.46
        downdiff = sum(cv2.absdiff(imgdowncurrent, self.machinedown_base))
        if self.isJudgeMachineT is True and self.is_work is False:
            if updiff > downdiff:
                self.Ldown.append(1)
                self.Lup.append(0)
            else:
                self.Lup.append(1)
                self.Ldown.append(0)
            self.Ldown.pop(0)
            self.Lup.pop(0)
            if sum(self.Ldown) > 6:
                self.isJudgeMachineT = False
                self.machineLocation = "down"
            if sum(self.Lup) > 6:
                self.isJudgeMachineT = False
                self.machineLocation = "up"

        if self.machineLocation == "down":
            peopleDownCurrent = cv2.bitwise_and(img_gray, self.peopledown_mask)
            if sum(cv2.absdiff(peopleDownCurrent,
                               self.peopledown_base)) > 70000:
                self.Lhandsdown.append(1)
            else:
                self.Lhandsdown.append(0)
            self.Lhandsdown.pop(0)
            if sum(self.Lhandsdown) > 6 and self.isDownStart is False:
                message = "[" + datetime.datetime.now().strftime(
                    '%Y/%m/%d %H:%M:%S') + "]" + "工人正在下方调试"
                self.displayMessage(message)
                self.isDownStart = True
                self.downStartTime = time.time()
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','tiaoshi',0)")
            if time.time() - self.downStartTime > 120:
                self.isDownStart = False
                self.machineLocation = ""
                self.tiaoshitime = time.time()

            if sum(self.Lhandsdown) < 4 and self.isDownStart is True:
                message = "[" + datetime.datetime.now().strftime(
                    '%Y/%m/%d %H:%M:%S') + "]" + "工人结束下方调试"
                self.displayMessage(message)
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','tiaoshi',1)")
                self.isDownStart = False
                self.machineLocation = ""

        if self.machineLocation == "up":
            peopleUpCurrent = cv2.bitwise_and(img_gray, self.peopleup_mask)
            if sum(cv2.absdiff(peopleUpCurrent, self.peopleup_base)) > 60000:
                self.Lhandsup.append(1)
            else:
                self.Lhandsup.append(0)
            self.Lhandsup.pop(0)
            print(sum(self.Lhandsup))
            if sum(self.Lhandsup) > 6 and self.isUpStart is False:
                message = "[" + datetime.datetime.now().strftime(
                    '%Y/%m/%d %H:%M:%S') + "]" + "工人正在上方调试"
                self.displayMessage(message)
                self.isUpStart = True
                self.upStartTime = time.time()
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','tiaoshi',0)")
            if time.time() - self.upStartTime > 120:  # 若时间大于120秒,则放弃判断是否拆机
                self.machineLocation = ""
                self.isUpStart = False

            if sum(self.Lhandsup) < 4 and self.isUpStart is True:
                message = "[" + datetime.datetime.now().strftime(
                    '%Y/%m/%d %H:%M:%S') + "]" + "工人结束上方调试"
                self.displayMessage(message)
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','tiaoshi',1)")
                self.machineLocation = ""
                self.isUpStart = False
                self.tiaoshitime = time.time()

    # def video_recogbottle(self):
    #     img = self.frame_right
    #     img = cv2.resize(img, (1280, 720))
    #     if self.isJudgeMachineB is True:
    #         img_bottle = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #         img_bottle = cv2.bitwise_and(img_bottle, self.bottle_area)
    #         if np.sum(cv2.absdiff(img_bottle, self.bottle_area)) < 50000:
    #             self.Lbottle.append(1)
    #         else:
    #             self.Lbottle.append(0)
    #         self.Lbottle.pop(0)
    #         if self.isBottleStart is False and sum(self.Lbottle) > 5:
    #             self.isBottleStart = True  # 初始为False
    #             self.displayMessage(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') + "..........开始换气瓶!")
    #             current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    #             da = data_access.DataAccess()
    #             da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" + current_time + "','bottle',0)")
    #
    #         if self.isBottleStart is True and sum(self.Lbottle) < 2:
    #             self.isBottleStart = False
    #             self.displayMessage(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') + "..........换气瓶结束!")
    #             current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    #             da = data_access.DataAccess()
    #             da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" + current_time + "','bottle',1)")

    def video_recogzhuangji(self):
        img = self.frame_left
        img = cv2.resize(img, (1280, 720))
        img_right = cv2.bitwise_and(self.mask_right, img)
        hsv_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2HSV)
        mask_det = cv2.inRange(hsv_right, self.redLower, self.redUpper)
        img_left = cv2.bitwise_and(self.mask_left, img)
        hsv_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2HSV)
        mask_det1 = cv2.inRange(hsv_left, self.redLower, self.redUpper)

        if self.is_JudgeRL is True:
            if np.sum(mask_det) < 10000:
                self.Lright.append(1)
            else:
                self.Lright.append(0)
            self.Lright.pop(0)
            if sum(self.Lright) > 6 and self.isRightStart is False:
                self.displayMessage(
                    "[" +
                    datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') +
                    "]" + "工人正在右方拆机")
                self.isRightStart = True
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','chaiji',0)")
            if sum(self.Lright) < 2 and self.isRightStart is True:
                self.displayMessage(
                    "[" +
                    datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') +
                    "]" + "工人结束右方拆机")
                self.isRightStart = False
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','chaiji',1)")

            if np.sum(mask_det1) < 50000:
                self.Lleft.append(1)
            else:
                self.Lleft.append(0)
            self.Lleft.pop(0)
            if sum(self.Lleft) > 6 and self.isLeftStart is False:
                self.displayMessage(
                    "[" +
                    datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') +
                    "]" + "工人正在左方拆机")
                self.isLeftStart = True
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','chaiji',0)")
            if sum(self.Lleft) < 2 and self.isLeftStart is True:
                self.displayMessage(
                    "[" +
                    datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') +
                    "]" + "工人结束左方拆机")
                self.isLeftStart = False
                current_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                da = data_access.DataAccess()
                da.update_("insert into dzrecord(SJC,ACTION,FLAG)values('" +
                           current_time + "','chaiji',1)")

    def shumeiDeal(self):
        global Stype
        while True:
            if Stype == 1 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 1
            if Stype == 2 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "5s保养"
                self.displayMessage(message)
                self.stype = 2
            if Stype == 3 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + ""
                self.displayMessage(message)
                self.stype = 3
            if Stype == 4 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 4
            if Stype == 5 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 5
            if Stype == 6 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 6
            if Stype == 0:
                if self.stype == 1:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 2:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束5s!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 3:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 4:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 5:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 6:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人吃饭!"
                    self.stype = 0
                    self.displayMessage(message)

            time.sleep(0.06)

    def video_receive_local(
            self,
            cam1='E:/projects-summary/xiaowork/maindo/videos/西奥待检测数据/视频合并200512103448.mp4',
            cam2='E:\\剪辑\\zhuangji\\ch11_20171221084313 00_09_06-00_10_21~2.mp4',
            time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''

        self.left_cam = cv2.VideoCapture(cam1)
        ret_1, frame_1 = self.left_cam.read()
        preCamPath = cam1
        while True:

            self.frame_left = frame_1
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if self.CamPath != "" and self.CamPath != preCamPath:
                self.left_cam = cv2.VideoCapture(self.CamPath)
                preCamPath = self.CamPath
            ret_1, frame_1 = self.left_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''
        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if self.type_l == 'work':
                cv2.rectangle(frame_change, (self.X_l, self.Y_l),
                              (self.X_l + 100, self.Y_l + 100), (0, 255, 0), 4)
            if self.tiaoshi_back is True:  # self.pb_loc = (130, 260, 610, 690)self.pf_loc = (250, 420, 800, 900)
                cv2.rectangle(frame_change, (610, 130), (690, 260),
                              (255, 0, 0), 4)
            # if self.tiaoshi_forward is True:
            #     cv2.rectangle(frame_change, (800, 250), (900, 420), (255, 0, 0), 4)
            if self.chaiji_left is True:
                cv2.rectangle(frame_change, (20, 350), (210, 600),
                              (255, 255, 0), 4)
            if self.chaiji_right is True:
                cv2.rectangle(frame_change, (980, 5), (1090, 130),
                              (255, 255, 0), 4)

            frame_resize = cv2.resize(frame_change, (360, 240),
                                      interpolation=cv2.INTER_AREA)

            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1],
                                 frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        # def label_show_right(frame, label=self.ui.label_2):  # 右空间Lable播放
        #     height, width, _ = frame.shape
        #     frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        #     frame_resize = cv2.resize(frame_change, (360, 240), interpolation=cv2.INTER_AREA)
        #     image = QtGui.QImage(frame_resize.data, frame_resize.shape[1], frame_resize.shape[0],
        #                          QtGui.QImage.Format_RGB888)  # 处理成QImage
        #     label.setPixmap(QtGui.QPixmap.fromImage(image))

        if self.frame_left is not None:
            label_show_left(self.frame_left)

    def draw(self):
        '''
        展示图标
        :return:
        '''
        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            da = data_access.EquipmentData()
            result = da.select()
            # fp.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))  # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            fp.plot(*(33, 28, 37, 94))
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            current_time = datetime.datetime.now().strftime('%Y-%m-%d')
            lossTime = data_access.EquipmentTimeData()
            result_loss = lossTime.select_(
                "select * from loss ORDER BY SJ DESC limit 1")
            zongshijian = time.strftime('%H:%M:%S',
                                        time.localtime(time.time()))
            # huanxing = result_loss[0][1]
            # dailiao = result_loss[0][2]
            # shebeiguzhang = result_loss[0][3]
            # tingzhi = result_loss[0][4]
            # # qitashijian=result[0][5]
            # # kongyunzhuan=result[0][6]
            # fuheshijian = (int(zongshijian.split(':')[0]) - 8) * 3600 + int(zongshijian.split(':')[1]) * 60 + int(
            #     zongshijian.split(':')[2]) - tingzhi
            # shijijiagong_1 = fuheshijian - huanxing - dailiao - shebeiguzhang

            da = data_access.DataAccess()
            resultw = da.select_(
                "SELECT * from mstatus where SJC>date(now()) and `status`=1")
            if len(resultw) != 0:
                start_time = resultw[0][0]
                fuheshijian = (datetime.datetime.now() - start_time).seconds
                if time.localtime()[3] > 11:
                    fuheshijian = (datetime.datetime.now() -
                                   start_time).seconds - 60  # 吃饭时间
                eff = int(len(resultw) / fuheshijian * 100 * 22)  # 计算效率
            else:
                eff = 0

            hour = time.localtime()[3]  # 实时更新
            da_oee = data_access.OEEData()
            # da_oee.update_("update oee_date set O" + str(hour) + "=" + str(eff) + ' where SJC="' + current_time + '"')
            L_eff = []
            oee = Figure_OEE()
            da = data_access.OEEData()
            # result = da.select()
            hour = time.localtime()[3]
            result = [77, 82, 83, 79, 81, 85, 81, 78, 81, 85, 82, 81]
            for i in range(1, hour - 6):
                L_eff.append(result[i])
            oee.plot(*tuple(L_eff))  # 参数
            # oee.plot(*tuple([77,82,83,79,81,85,81,78]))
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            da = data_access.EquipmentTimeData()
            result = da.select()
            # loss.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))
            loss.plot(*(140, 121, 113, 437))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        def draw_mt():  # 绘制耗材使用图
            mt = Figure_MT()
            bottle = 0
            current_time = datetime.datetime.now().strftime('%Y-%m-%d')
            da_mt = data_access.EquipmentTimeData()
            # result_mt = da_mt.select_(
            #     'select * from mtrecord where date_format(SJC, "%Y-%m-%d") = "' + current_time + '"')
            # for result_m in result_mt:
            #     if result_m[1] == "bottlechange":
            #         bottle += 1
            mt.plot(*(4, 5, 3))
            graphicscene_mt = QtGui.QGraphicsScene()
            graphicscene_mt.addWidget(mt.canvas)
            self.ui.graphicsView_MT.setScene(graphicscene_mt)
            self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        draw_mt()
        draw_oee()

    def video_recog(self):
        '''
        视频识别部分
        :return:
        '''
        self.totaltime += 1
        frame_left = self.frame_left  # 原始彩色图,左边摄像头
        frame_left_gray = cv2.cvtColor(frame_left,
                                       cv2.COLOR_BGR2GRAY)  # 原始图的灰度图

        # frame_right = self.frame_right
        # frame_right_gray = cv2.cvtColor(frame_right, cv2.COLOR_BGR2GRAY)

        # frame_right = self.frame_left  # 原始彩色图
        # frame_right_gray = cv2.cvtColor(frame_right, cv2.COLOR_BGR2GRAY)

        def video_recog_left():
            img = frame_left
            spark, x, y = self.vision.find_spark(img)
            self.q.enqueue(spark)
            # print(spark)
            if spark:
                self.type_l = 'work'
                self.X_l = x
                self.Y_l = y
            else:
                self.type_l = ''
            if spark or True in self.q.queue:  # 如果一段间隔时间内不断有火花(和机器移动,稍后完成),则说明机器必定处于工作状态
                self.one_static_time = 0  # 恢复到运动后,一次静止时间重新清零
                self.work_time += 1
                self.is_work = True

                if self.work_time % 20 == 0:
                    current_time = datetime.datetime.now().strftime(
                        '%Y-%m-%d %H:%M:%S')
                    da = data_access.DataAccess()

                    da.update_("insert into mstatus(SJC,status)values('" +
                               current_time + "',1)")
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + '机器正在工作'
                    self.displayMessage(message)
            else:
                # ******* 截图
                self.is_work = False
                self.one_static_time += 1  # 一次静止时间
                if self.one_static_time % 60 == 0:
                    print('start or static')
                    print('静止了,往catch文件夹中查看原因')
                    t = time.localtime()
                    hour = t[3]
                    mini = t[4]
                    seco = t[5]
                    filename = str(hour) + '-' + str(mini) + '-' + str(seco)
                    cv2.imwrite('./catch/' + filename + '.jpg', img)
                # ********

                self.action = ThreadedTCPRequestHandler.action  # 键盘操作
                if self.action is not None:  # 往面板上写当前由于什么原因导致机器静止
                    if self.pre_action is None:
                        print(self.action)
                        message = '[' + time.strftime(
                            '%Y-%m-%d %H:%M:%S', time.localtime(
                                time.time())) + ']' + str(self.action)
                        self.displayMessage(message)

                if self.vision.tiaoshi(frame_left_gray):
                    self.action_video = 'tiaoshi'
                if self.action_video is not None:
                    if self.pre_action_video is None:
                        pass
                        # print(self.action_video)
                        # message = '[' + time.strftime('%Y-%m-%d %H:%M:%S',
                        #                               time.localtime(time.time())) + ']' + str(self.action_video)
                        # self.displayMessage(message)

        video_recog_left()
        self.pre_action = self.action
        self.pre_action_video = self.action_video

    def data_read(self):
        pass

    def displayMessage(self, message):

        self.ui.textBrowser.append(message)