Exemplo n.º 1
0
def runDetectionsOnCam(url, camName):
    global config

    newFrame = None
    detector = detect.detector(camName, config.get('ssd', 'protocol'),
                               config.get('ssd', 'model'))
    ##detector = detect_motion.detect_motion()
    cam = webcam.threadCamReader(url)
    cam.start()
    sender = imagezmq.ImageSender(connect_to='tcp://*:555' + camName[-1:],
                                  block=False)
    readFrameID = None
    while True:
        time.sleep(1 / 10000)
        frame, frameID = cam.read()
        #Just skip if we read the same frame or done have a frame at all
        if frame is None or frameID is None or readFrameID == frameID:
            #print(camName + ' ' + str(frameID))
            continue

        readFrameID = frameID
        newFrame = detector.detect(frame, config.get(camName, 'fps'))

        if newFrame is not None:
            sender.send_image(camName, newFrame)
Exemplo n.º 2
0
def realtime():
    #initialize preview
    cv2.namedWindow("preview")
    dtr = detector()
    vc = cv2.VideoCapture(0)
    model = load_model()
    max_X = 20  # max movement of x possible
    max_Y = 20  # max movement of y possible
    min_jump_X = 600  # not used now
    min_jump_Y = 400  # not used now
    prev_X, prev_Y = locate_cursor()  # initial position
    if vc.isOpened():  #get the first frame
        rval, frame = vc.read()
    else:
        rval = False
    while rval:
        frame = cv2.flip(frame, 1)
        eyes = dtr.detect(frame)
        for (x, y, w, h) in eyes:
            img = cv2.cvtColor(frame[y:y + h, x:x + w], cv2.COLOR_RGB2GRAY)
            img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_AREA)
            frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)
            #cv2.circle(frame,(int(x + w/2),int(y+h/2)),int(h*0.7),(255,0,0),2)
            img = img.reshape((1, ) + img.shape + (1, )) / 255.
            y_pred = model.predict(img)  #predict the position
            x = int(y_pred[0][0])
            y = int(y_pred[0][1])
            if x - prev_X > max_X:
                x = prev_X + max_X
            elif x - prev_X < -max_X:
                x = prev_X - max_X
            if y - prev_Y > max_Y:
                y = prev_Y + max_Y
            elif y - prev_Y < -max_Y:
                y = prev_Y - max_Y
            move(x, y)
            prev_X = x
            prev_Y = y
            break
        frame = cv2.resize(frame, (200, 160), interpolation=cv2.INTER_AREA)
        cv2.imshow('preview', frame)
        rval, frame = vc.read()
        key = cv2.waitKey(20)
        if key == 27:  # exit on ESC
            break
    cv2.destroyWindow("preview")
    vc = None
Exemplo n.º 3
0
def generate_data():
    cursor = []
    i = 20600
    cv2.namedWindow("preview")
    dtr = detector()
    vc = cv2.VideoCapture(0)

    if vc.isOpened():  #get the first frame
        rval, frame = vc.read()

    else:
        rval = False

    while rval:
        frame = cv2.flip(frame, 1)
        eyes = dtr.detect(frame)
        #        if len(eyes) > 0:
        #            x,y,w,h = eyes[0]
        #            frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
        #            eye = frame[y:y+h, x:x+w]
        #            eye = cv2.cvtColor( eye, cv2.COLOR_RGB2GRAY )
        #            eye = cv2.resize(eye, (50, 50))
        #            cv2.imwrite('.\\Data\\eye_images\\eye_{}.jpg'.format(i), eye)
        #            cursor.append(get_cursor_position())
        #            i += 1
        for (x, y, w, h) in eyes:
            frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)
            eye = frame[y:y + h, x:x + w]
            eye = cv2.cvtColor(eye, cv2.COLOR_RGB2GRAY)
            eye = cv2.resize(eye, (50, 50))
            cv2.imwrite('.\\Data\\eye_images\\eye_{}.jpg'.format(i), eye)
            cursor.append(get_cursor_position())
            i += 1
            print(i)
            break

        cv2.imshow('preview', frame)
        rval, frame = vc.read()
        key = cv2.waitKey(20)
        if key == 27:  # exit on ESC
            break
    cv2.destroyWindow("preview")
    vc = None
    return cursor
Exemplo n.º 4
0
def find_obj(core):
    '''
    Rotates right until an object is detected by obj detector
  '''
    obj_detected = False
    while not obj_detected:
        send_act(core, 'TURN_RIGHT')
        time.sleep(.4)
        send_act(core, 'STOP')
        time.sleep(.01)  #stop briefly to stablize camera image

        #Obj detected?
        img = take_image()
        if img is not None:
            #find if object is detected
            obj_detected, obj_img = detector(img)

    return obj_img
Exemplo n.º 5
0
    def __init__(self):
        # Deploy darknet53 model on cooresponding device
        yolov3 = darknet("cfg/yolov3-1.cfg", 1)
        yolov3.load_weight("yolov3-1.weights")

        # Deploy stacked hourglass model
        stackedhourglass = StackedHourglass(16)
        stackedhourglass.load_state_dict(torch.load("stacked_hourglass.pkl"))

        cuda = torch.cuda.is_available()
        if cuda:
            yolov3.cuda()
            stackedhourglass.cuda()

        yolov3.eval()

        self.detector = detector(yolov3)
        self.estimator = Estimator(stackedhourglass)
Exemplo n.º 6
0
def collect_images(core, num_images):
  '''
    Rotates and saves images.
  '''
  for i in range(num_images):
    while True: #primary loop
      send_act(core, 'TURN_RIGHT')
      time.sleep(.4)
      send_act(core, 'STOP')
      time.sleep(.01)

      img = take_image()
      if img is not None:
        obj_detected, obj_img = detector(img)
        save_image('images/image'+str(i)+'.jpg', img)
        if obj_detected:
          save_image('images/detected'+str(i)+'.jpg', obj_img)
        i+=1
        time.sleep(.01)
Exemplo n.º 7
0
    def __init__(self):
        # Deploy darknet53 model on cooresponding device
        yolov3 = darknet("cfg/yolov3.cfg", 80)
        yolov3.load_weight("yolov3.weights")
        yolov3.eval()

        # Deploy stacked hourglass model
        stackedhourglass = demo.__dict__['hg'](num_stacks=2,
                                               num_blocks=1,
                                               num_classes=16)
        stackedhourglass = torch.nn.DataParallel(stackedhourglass)
        stackedhourglass.eval()
        checkpoint = torch.load('demo/hg_s2_b1/model_best.pth.tar')
        stackedhourglass.load_state_dict(checkpoint['state_dict'])

        cuda = torch.cuda.is_available()
        if cuda:
            yolov3.cuda()
            stackedhourglass.cuda()

        self.detector = detector(yolov3)
        self.estimator = stackedhourglass
Exemplo n.º 8
0
    )
    st.write(
        'Welcome to the 2MCAM web application that can accurately identify human beings that violate social distancing norms in public spaces.'
    )

elif option == 'Social distancing detector':
    st.title('Social Distancing Detection')
    st.write(
        'The practice of social distancing signifies maintaining a distance of 6 feet or more when in public places or simply staying at home and away from others as much as possible to help prevent spread of COVID-19.'
    )
    st.write(
        'A green/red bounding box is drawn over the individuals in the frame to approve or flag the distance between them. '
    )
    if st.button("Start"):

        detector()

elif option == 'Learn more!':
    st.title('Why 2MCAM?')
    st.image('data/img1.jpg')
    st.write(
        '2MCAM is a user-friendly ML web application and is designed to detect possible violations of social distancing norms.'
    )
    st.write(
        'Violation of physical distancing norms (1.5 - 2 meters) is looked down upon in the era of the covid19 pandemic. This is why we have a solution for you: 2MCAM, the web application that can instantaneously detect and flag such violations.'
    )
    st.image('data/2feet.jpeg')
    st.write(
        'Our application has a light-weight frontend and heavily tested backend waiting to be used. Discover the working of our application by navigating to the social distancing detector section.'
    )
Exemplo n.º 9
0
    def run(self):
        idx_frame = 0
        # Load yolov3_tiny_se detect
        weights = 'best4.pt'
        cfg = 'yolov3-tiny-1cls-se.cfg'
        img_size = 416
        device = torch_utils.select_device(device='0')
        # print(device)

        # Initialize model
        model = Darknet(cfg, img_size)

        # Load weights
        attempt_download(weights)
        if weights.endswith('.pt'):  # pytorch weights format
            model.load_state_dict(
                torch.load(weights, map_location=device)['model'])
        else:
            load_darknet_weights(model, weights)

        # 预载模型
        model.to(device).eval()
        fps_list = []
        while self.vdo.grab():
            idx_frame += 1
            if idx_frame % self.args.frame_interval:
                continue

            _, ori_im = self.vdo.read()  # get frame
            if ori_im is not None:
                im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)  # init frame
            #print(im.shape)
            start = time.time()
            # 获取 detection后的结果
            bbox_xywh, cls_conf, cls_ids, bbox_xyxy1 = detector(
                im, device, model)
            #print(bbox_xywh, cls_conf, cls_ids)

            stop = time.time()

            if bbox_xywh is not None:
                # select car class
                mask = cls_ids == 0

                bbox_xywh = bbox_xywh[mask]
                bbox_xywh[:, 3:] *= 1.2  # 放大 bbox,避免 bbox过小
                cls_conf = cls_conf[mask]

                # 获取目标 tracking
                outputs, sum_id, track_list = self.deepsort.update(
                    bbox_xywh, cls_conf, im)

                # yolov3 和 DeepSort的可视化
                if len(outputs) > 0:
                    bbox_xyxy = outputs[:, :4]
                    identities = outputs[:, -1]
                    # print(bbox_xyxy,identities)
                    ori_im = draw_boxes(ori_im, bbox_xyxy, track_list,
                                        identities)  # DeepSort
                    ori_im = draw_boxes111(ori_im, bbox_xyxy1,
                                           cls_conf)  # YOLOv3
                    # ori_im = draw_track(ori_im, bbox_xyxy, track_list) # track
            if bbox_xywh is None:
                sum_car = 'Traffic flow(frame): ' + '0'
                ori_im = cv2.putText(ori_im, sum_car, (10, 50),
                                     cv2.FONT_HERSHEY_PLAIN, 2.5,
                                     [255, 255, 255], 2)
            ori_im = draw_id(ori_im, sum_id)  # sum_id
            end = time.time()
            fps = 1 / (end - start + 0.001)
            fps_list.append(fps)
            # print("yolov3_tiny-time: {:.03f}s, fps: {:.03f}".format(stop - start, 1 / (stop - start)))
            print("total-time: {:.03f}s, fps: {:.03f}".format(
                end - start, fps))

            if self.args.display and ori_im is not None:
                cv2.imshow("test", ori_im)
                cv2.waitKey(1)

            if self.args.save_path:
                self.writer.write(ori_im)
        avg_fps = np.mean(fps_list)
        print("avg_fps: {:.03f}".format(avg_fps))
device = torch_utils.select_device(device='0')
print(device)

# Initialize model
model = Darknet(cfg, img_size)

# Load weights
attempt_download(weights)
if weights.endswith('.pt'):  # pytorch format
    model.load_state_dict(torch.load(weights, map_location=device)['model'])
else:  # darknet format
    load_darknet_weights(model, weights)

# Eval mode
model.to(device).eval()
while True:
    ret, frame = cap.read()
    if ret:
        print(frame.shape)
        start = time.time()
        bbox_xywh, cls_conf, cls_ids = detector(frame, device, model)
        end = time.time()
        print(bbox_xywh, cls_conf, cls_ids)
        print(end - start, 1 / (end - start + 0.00000001))

    else:
        break

cap.release()
cv2.destroyAllWindows()
    def run(self):
        idx_frame = 0
        # Load yolov3_tiny_se detect
        weights = 'best4.pt'
        cfg = 'yolov3-tiny-1cls-se.cfg'
        img_size = 416
        device = torch_utils.select_device(device='0')
        #print(device)

        # Initialize model
        model = Darknet(cfg, img_size)

        # Load weights
        attempt_download(weights)
        if weights.endswith('.pt'):  # pytorch format
            model.load_state_dict(
                torch.load(weights, map_location=device)['model'])
        else:  # darknet format
            load_darknet_weights(model, weights)

        # Eval mode
        model.to(device).eval()
        fps_list = []
        while self.vdo.grab():
            idx_frame += 1
            if idx_frame % self.args.frame_interval:
                continue

            _, ori_im = self.vdo.retrieve()
            im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
            #print(im.shape)
            start = time.time()
            # do detection
            bbox_xywh, cls_conf, cls_ids, bbox_xyxy1 = detector(
                im, device, model)
            #print(bbox_xywh, cls_conf, cls_ids)
            stop = time.time()

            if bbox_xywh is not None:
                # select car class
                mask = cls_ids == 0

                bbox_xywh = bbox_xywh[mask]
                #bbox_xywh[:,3:] *= 1.2 # bbox dilation just in case bbox too small
                cls_conf = cls_conf[mask]

                # do tracking
                outputs = self.deepsort.update(bbox_xywh, cls_conf, im)

                # draw boxes for visualization
                if len(outputs) > 0:
                    bbox_xyxy = outputs[:, :4]
                    identities = outputs[:, -1]
                    #print(bbox_xyxy,identities)
                    ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
                    ori_im = draw_boxes111(ori_im, bbox_xyxy1, cls_conf)

            end = time.time()
            fps = 1 / (end - start + 0.001)
            fps_list.append(fps)
            #print("yolov3_tiny-time: {:.03f}s, fps: {:.03f}".format(stop - start, 1 / (stop - start)))
            print("total-time: {:.03f}s, fps: {:.03f}".format(
                end - start, fps))

            if self.args.display:
                cv2.imshow("test", ori_im)
                cv2.waitKey(1)

            if self.args.save_path:
                self.writer.write(ori_im)
        avg_fps = np.mean(fps_list)
        print("avg_fps: {:.03f}".format(avg_fps))
Exemplo n.º 12
0
def recognition():
    detector()