コード例 #1
0
def setup_app(app):
    print('Connecting to a DLI PowerSwitch at lpc.digital-loggers.com')
    global controller
    controller = MainController()
    manager = DeviceManager(controller)
    manager.start()
    global pSuccess
    if not controller.pSuccess:
        pSuccess = 'not established'
    global pList
    pList = controller.get_list()
コード例 #2
0
def main():

    application = MainController()
    application.geometry("700x500")
    application.title("Finit State Machine")

    frame = Frame(application, bg='#0555ff')
    application.mainloop()
コード例 #3
0
    def __init__(self):
        self.root = Tk()
        self.root.resizable(width=False, height=False)

        self.root.geometry("300x500")
        self.root.title("What is Title")
        self.root.configure(background="black")
        self.mc = MainController.MainController()
        frameList = {
            'sign': SignUpFrame.SignUpFrame(self, self.root),
            'main': MainFrame.MainFrame(self, self.root),
            'selling': SellingFrame.SellingFrame(self, self.root),
            'setting': SettingFrame.SettingFrame(self, self.root),
            'login': LoginFrame.LoginFrame(self, self.root),
            'room': RoomFrame.RoomFrame(self, self.root),
            'watch': WatchlistFrame.WatchlistFrame(self, self.root)
        }

        self.mc.createHandler(frameList)
        #        self.mc.eventHandler['room'].clock.sem.acquire()
        # Set the Frame for LoginFrame
        self.mc.eventHandler.changeFrame(self.mc.frameList['login'].loginFrame)
        self.frameList = frameList
        self.root.mainloop()
        self.quit()
コード例 #4
0
 def __init__(self, port, clientList):
     threading.Thread.__init__(self)
     self.port = port
     self.clientList = clientList
     self.mc = MainController.MainController(self.userList, self.threadList)
     self.db = DBConnection.DBConnection()
     self.threadList = []
コード例 #5
0
ファイル: main.py プロジェクト: VanxeHus/ForFriends
def handle(sck, addr):
    dataBuf = ""
    startTime = time.time()
    while True:
        dataBuf += sck.recv(1024)
        nowTime = time.time()
        # 若达到包头长度则处理
        if len(dataBuf) >= sCon.HEADER_SIZE:
            # 读包头
            startTime = time.time()
            header = struct.unpack('>I4s', dataBuf[:sCon.HEADER_SIZE])
            #print header
            # 若达到body长度则处理
            if len(dataBuf) >= sCon.HEADER_SIZE + header[0]:
                body = dataBuf[sCon.HEADER_SIZE:sCon.HEADER_SIZE + header[0]]
                # 分割body
                # 获取body中各参数
                bDic = body.split()
                print "bDic:", bDic
                bMap = {}
                for value in bDic:
                    tDic = value.split(":")
                    if len(tDic) > 1:
                        bMap[tDic[0]] = tDic[1]
                print "bMap:", bMap
                # 数据包字典
                data = {"Header": header, "Body": bMap}
                MainController.GetInstance().Handle(sck, addr, data)
                # 抛弃已经处理完的数据包
                dataBuf = dataBuf[sCon.HEADER_SIZE + header[0]:]
            else:
                #raise NameError, "body's len is:%s not enough" % (len(dataBuf) - sCon.HEADER_SIZE)
                continue
        else:
            if nowTime - startTime > sCon.ALIVE:
                sck.close()
                removeSocket(addr)
                return
            else:
                #raise NameError, "data'len is:%s not enough" % len(dataBuf)
                continue
コード例 #6
0
from Controller.MainController import *
if __name__ == "__main__":
    controller = MainController()

    controller.run()

コード例 #7
0
def detect(save_img):
    out, source, weights, view_img, save_txt, imgsz = \
        opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    webcam = source.isnumeric() or source.startswith(
        ('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')

    # Initialize
    set_logging()
    cars = MainController.getLatestVehicleAmount('cars', 'carId')
    motors = MainController.getLatestVehicleAmount('motorcycles',
                                                   'motorcycleId')
    trucks = MainController.getLatestVehicleAmount('trucks', 'truckId')

    totalCarAmount = cars + motors + trucks
    totalCars = cars
    totalTrucks = trucks
    totalMotors = motors
    displayTotalAmount = totalCarAmount
    displayCarAmount = totalCars
    displayTruckAmount = totalTrucks
    displayMotorAmount = totalMotors
    oldCombinedAmount = 0
    combinedAmount = 0
    tempAmount = 0

    # Video = False, Webcam = True
    control = False

    elapsed = 0
    device = select_device(opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder
    start = time.time()
    half = device.type != 'cpu'  # half precision only supported on CUDA

    # Load model
    model = attempt_load(weights, map_location=device)  # load FP32 model
    imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size
    if half:
        model.half()  # to FP16

    # Second-stage classifier
    classify = False
    if classify:
        modelc = load_classifier(name='resnet101', n=2)  # initialize
        modelc.load_state_dict(
            torch.load('weights/resnet101.pt',
                       map_location=device)['model'])  # load weights
        modelc.to(device).eval()

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)

    else:
        save_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names
    # colors = [[np.randint(0, 255) for _ in range(3)] for _ in range(len(names))]

    # Run inference

    t0 = time.time()
    ct = CentroidTracker()
    listDet = ['car', 'motorcycle', 'truck']

    totalDownCar = 0
    totalDownMotor = 0
    totalDownTruck = 0

    totalUpCar = 0
    totalUpMotor = 0
    totalUpTruck = 0

    trackableObjects = {}

    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img
              ) if device.type != 'cpu' else None  # run once
    for path, img, im0s, vid_cap in dataset:
        elapsed = time.time() - start
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = time_synchronized()
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)
        t2 = time_synchronized()

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        rects = []
        labelObj = []
        arrCentroid = []
        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
                # cv2.resize(im0, (2560, 1440))
            else:
                p, s, im0 = path, '', im0s
                # cv2.resize(im0, (2560, 1440))

            height, width, channels = im0.shape
            cv2.line(im0, (0, int(height / 1.5)),
                     (int(width), int(height / 1.5)), (255, 0, 0),
                     thickness=3)

            if not control:
                cv2.putText(im0,
                            'Totale koeretoejer: ' + str(displayTotalAmount),
                            (int(width * 0.02), int(height * 0.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                cv2.putText(im0, 'Bil: ' + str(displayCarAmount),
                            (int(width * 0.02), int(height * 0.55)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
                cv2.putText(im0, 'Motorcykel: ' + str(displayMotorAmount),
                            (int(width * 0.02), int(height * 0.60)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
                cv2.putText(im0, 'Lastbil: ' + str(displayTruckAmount),
                            (int(width * 0.02), int(height * 0.65)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
            else:
                cv2.putText(im0,
                            'Totale koeretoejer: ' + str(displayTotalAmount),
                            (int(width * 0.02), int(height * 0.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

            # cv2.line(im0, (int(width / 1.8), int(height / 1.5)), (int(width), int(height / 1.5)), (255, 127, 0), thickness=3)

            save_path = str(Path(out) / Path(p).name)
            txt_path = str(Path(out) / Path(p).stem) + (
                '_%g' % dataset.frame if dataset.mode == 'video' else '')
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1,
                                          0]]  # normalization gain whwh
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    label = '%s %.2f' % (names[int(cls)], conf)
                    # print(xyxy)
                    x = xyxy
                    tl = None or round(0.002 * (im0.shape[0] + im0.shape[1]) /
                                       2) + 1  # line/font thickness
                    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
                    label1 = label.split(' ')
                    if save_txt:  # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                                gn).view(-1).tolist()  # normalized xywh
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * 5 + '\n') %
                                    (cls, *xywh))  # label format

                    if label1[0] in listDet:
                        cv2.rectangle(im0,
                                      c1,
                                      c2, (0, 0, 0),
                                      thickness=tl,
                                      lineType=cv2.LINE_AA)
                        box = (int(x[0]), int(x[1]), int(x[2]), int(x[3]))
                        rects.append(box)
                        labelObj.append(label1[0])
                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(label,
                                                 0,
                                                 fontScale=tl / 3,
                                                 thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(im0, c1, c2, (0, 100, 0), -1,
                                      cv2.LINE_AA)
                        cv2.putText(im0,
                                    label, (c1[0], c1[1] - 2),
                                    0,
                                    tl / 3, [225, 255, 255],
                                    thickness=tf,
                                    lineType=cv2.LINE_AA)

                detCentroid = generateCentroid(rects)
                objects = ct.update(rects)

                for (objectID, centroid) in objects.items():
                    arrCentroid.append(centroid[1])
                for (objectID, centroid) in objects.items():
                    # print(idxDict)
                    to = trackableObjects.get(objectID, None)
                    if to is None:
                        to = TrackableObject(objectID, centroid)
                    else:
                        y = [c[1] for c in to.centroids]
                        direction = centroid[1] - np.mean(y)
                        to.centroids.append(centroid)
                        if not to.counted:  # arah up

                            if direction < 0 and centroid[
                                    1] < height / 1.5 and centroid[
                                        1] > height / 1.7:  ##up truble when at distant car counted twice because bbox reappear
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'car'):
                                    totalUpCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorcycle'):
                                    totalUpMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalUpTruck += 1
                                    to.counted = True

                            elif direction > 0 and centroid[
                                    1] > height / 1.5:  # arah down
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'car'):
                                    totalDownCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorcycle'):
                                    totalDownMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalDownTruck += 1
                                    to.counted = True

                    trackableObjects[objectID] = to

                oldCarAmount = totalCarAmount
                oldTotalCars = totalCars
                oldTotalTrucks = totalTrucks
                oldTotalMotors = totalMotors

                combinedAmount = totalDownCar + totalDownTruck + totalDownMotor + \
                                 totalUpCar + totalUpMotor + totalUpTruck

                totalCars = totalDownCar + totalUpCar
                totalTrucks = totalDownTruck + totalUpTruck
                totalMotors = totalDownMotor + totalUpMotor

                if not oldCombinedAmount == combinedAmount:
                    tempAmount = totalCarAmount + combinedAmount
                    oldCombinedAmount = combinedAmount

                if oldCarAmount < tempAmount:
                    totalCarAmount = tempAmount

                if not oldCarAmount == totalCarAmount:
                    displayTotalAmount += 1

                    if not oldTotalCars == totalCars:
                        dbInsOrUpdCar(totalCars)
                        displayCarAmount += 1

                    if not oldTotalTrucks == totalTrucks:
                        dbInsOrUpdTruck(totalTrucks)
                        displayTruckAmount += 1

                    if not oldTotalMotors == totalMotors:
                        dbInsOrUpdMotorcycle(totalMotors)
                        displayMotorAmount += 1

                if not control:
                    cv2.putText(im0, 'Frakoerende: ',
                                (int(width * 0.6), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                    cv2.putText(im0, 'Bil: ' + str(totalUpCar),
                                (int(width * 0.6), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalUpMotor),
                                (int(width * 0.6), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Lastbil: ' + str(totalUpTruck),
                                (int(width * 0.6), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)

                    cv2.putText(im0, 'Modkoerende: ',
                                (int(width * 0.02), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                    cv2.putText(im0, 'Bil: ' + str(totalDownCar),
                                (int(width * 0.02), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalDownMotor),
                                (int(width * 0.02), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Lastbil: ' + str(totalDownTruck),
                                (int(width * 0.02), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                else:
                    cv2.putText(im0, 'Frakoerende: ',
                                (int(width * 0.6), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 4, (50, 255, 255), 3)
                    cv2.putText(im0, 'Bil: ' + str(totalUpCar),
                                (int(width * 0.6), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalUpMotor),
                                (int(width * 0.6), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Lastbil: ' + str(totalUpTruck),
                                (int(width * 0.6), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

                    cv2.putText(im0, 'Modkoerende: ',
                                (int(width * 0.02), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 4, (50, 255, 255), 3)
                    cv2.putText(im0, 'Bil: ' + str(totalDownCar),
                                (int(width * 0.02), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalDownMotor),
                                (int(width * 0.02), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Lastbil: ' + str(totalDownTruck),
                                (int(width * 0.02), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Stream results
            if view_img:
                cv2.namedWindow('Main', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('Main', 1920, 1080)
                cv2.imshow("Main", im0)

                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)

                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fourcc = 'mp4v'  # output video codec
                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*fourcc), fps,
                            (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % Path(out))

    print('Done. (%.3fs)' % (time.time() - t0))
コード例 #8
0
ファイル: main.py プロジェクト: jellyDean/pound-selfie
    def OnInit(self):
        self.frame = MainController()
        self.frame.show()

        return True
コード例 #9
0
ファイル: main.py プロジェクト: jellyDean/pound-selfie
class App(wx.App):
    def OnInit(self):
        self.frame = MainController()
        self.frame.show()

        return True
コード例 #10
0
from Utility.Info.InitializeArisuRecord import *
import sys
from PyQt5.QtWidgets import QApplication
from Controller.MainController import *

if __name__ == '__main__':
    try:
        # Execute App
        app = QApplication(sys.argv)
        ExecuteLogger.printLog(str(sys.argv))  # todo 제거하기
        view = MainView()
        c = MainController(view)
        c.start()
        if len(sys.argv) > 1:
            for idx in range(1, len(sys.argv)):
                c.openRecordFile(sys.argv[idx])  # todo 오늘꺼를 또 열수도 있음
        app.exec_()
    except Exception as e:
        ExecuteLogger.printLog(str(e) + 'TopLevel Quit')
        ErrorLogger.reportError('Unexpected TopLevel Quit', e)
    finally:
        ExecuteLogger.printLog('@@Finish program@@\n')