def __handle_done(self):
        """
        to write the final box
        save ids and cordinate of the box in the self.output
        :return:
        """
        print(self.coordinates)
        open_cv.line(self.image,
                     self.coordinates[2],
                     self.coordinates[3],
                     self.color,
                     1)
        open_cv.line(self.image,
                     self.coordinates[3],
                     self.coordinates[0],
                     self.color,
                     1)

        self.click_count = 0
        coordinates = np.array(self.coordinates)


        self.output.write("-\n          id: " + str(self.ids) + "\n          coordinates: [" +
                          "[" + str(self.coordinates[0][0]) + "," + str(self.coordinates[0][1]) + "]," +
                          "[" + str(self.coordinates[1][0]) + "," + str(self.coordinates[1][1]) + "]," +
                          "[" + str(self.coordinates[2][0]) + "," + str(self.coordinates[2][1]) + "]," +
                          "[" + str(self.coordinates[3][0]) + "," + str(self.coordinates[3][1]) + "]]\n")

        draw_contours(self.image, coordinates, str(self.ids + 1), COLOR_WHITE)

        for i in range(0, 4):
            self.coordinates.pop()

        self.ids += 1
    def __handle_done(self):
        open_cv.line(self.image, self.coordinates[2], self.coordinates[3],
                     self.color, 1)
        open_cv.line(self.image, self.coordinates[3], self.coordinates[0],
                     self.color, 1)

        self.click_count = 0

        coordinates = np.array(self.coordinates)
        print(coordinates)

        self.output.write("-\n          id: " + str(self.ids) +
                          "\n          coordinates: [" + "[" +
                          str(self.coordinates[0][0]) + "," +
                          str(self.coordinates[0][1]) + "]," + "[" +
                          str(self.coordinates[1][0]) + "," +
                          str(self.coordinates[1][1]) + "]," + "[" +
                          str(self.coordinates[2][0]) + "," +
                          str(self.coordinates[2][1]) + "]," + "[" +
                          str(self.coordinates[3][0]) + "," +
                          str(self.coordinates[3][1]) + "]]\n")

        draw_contours(self.image, coordinates, str(self.ids + 1), COLOR_WHITE)

        for i in range(0, 4):
            self.coordinates.pop()

        self.ids += 1
Exemplo n.º 3
0
    def process_algo_per_frame(self, frame, capture, coordinates_data, times,
                               statuses):
        blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
        grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
        new_frame = frame.copy()
        logging.debug("new_frame: %s", new_frame)

        position_in_seconds = capture.get(open_cv.CAP_PROP_POS_MSEC) / 1000.0

        for index, c in enumerate(coordinates_data):
            status = self.__apply(grayed, index, c)

            if times[index] is not None and self.same_status(
                    statuses, index, status):
                times[index] = None
                continue

            if times[index] is not None and self.status_changed(
                    statuses, index, status):
                if position_in_seconds - times[
                        index] >= MotionDetector.DETECT_DELAY:
                    statuses[index] = status
                    if status:
                        if self.previousStatusesCount == 0:
                            self.previousStatusesCount = self.previousStatusesCount + 1
                        self.currentStatusesCount = self.currentStatusesCount + 1
                    times[index] = None
                continue

            if times[index] is None and self.status_changed(
                    statuses, index, status):
                times[index] = position_in_seconds

        for index, p in enumerate(coordinates_data):
            coordinates = self._coordinates(p)

            color = COLOR_GREEN if statuses[index] else COLOR_BLUE
            draw_contours(new_frame, coordinates, str(p["id"] + 1),
                          COLOR_WHITE, color)

        for index, p in enumerate(coordinates_data):
            if statuses[index]:
                self.waterLevelSlots.get_water_level_slots(
                )[index].set_water_level(False)
            else:
                self.waterLevelSlots.get_water_level_slots(
                )[index].set_water_level(True)

        if self.previousStatusesCount != self.currentStatusesCount:
            self.previousStatusesCount = self.currentStatusesCount
            self.persistWaterLevelData(
                self.waterLevelSlots.get_current_water_level())
        else:
            self.previousStatusesCount = 0
        #print("green - " + str(self.countFalse))
        #print("red - " + str(self.countTrue))

        return new_frame, self.waterLevelSlots
Exemplo n.º 4
0
    def detectionTrigger(self, frame, capture, coordinates_data, times,
                         statuses, spotNo):
        if True:

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)

            position_in_seconds = capture.get(
                open_cv.CAP_PROP_POS_MSEC) / 1000.0

            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds

            for index, p in enumerate(coordinates_data):
                # print status
                # print(statuses)

                coordinates = self._coordinates(p)

                color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)

            open_cv.imshow(str(self.video), new_frame)
            k = open_cv.waitKey(1)
            if k == ord("q"):
                capture.release()
                open_cv.destroyAllWindows()
                # break

            # adding some delay
            time.sleep(0.011)
            # returning the spot is filled or occupied
            return (statuses[spotNo])
Exemplo n.º 5
0
    def process_algo_per_frame(self, frame, capture, coordinates_data, times,
                               statuses):
        blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
        grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
        new_frame = frame.copy()
        logging.debug("new_frame: %s", new_frame)

        position_in_seconds = capture.get(open_cv.CAP_PROP_POS_MSEC) / 1000.0

        for index, c in enumerate(coordinates_data):
            status = self.__apply(grayed, index, c)

            if times[index] is not None and self.same_status(
                    statuses, index, status):
                times[index] = None
                continue

            if times[index] is not None and self.status_changed(
                    statuses, index, status):
                if position_in_seconds - times[
                        index] >= MotionDetector.DETECT_DELAY:
                    statuses[index] = status
                    times[index] = None
                continue

            if times[index] is None and self.status_changed(
                    statuses, index, status):
                times[index] = position_in_seconds

        for index, p in enumerate(coordinates_data):
            coordinates = self._coordinates(p)

            color = COLOR_GREEN if statuses[index] else COLOR_BLUE
            draw_contours(new_frame, coordinates, str(p["id"] + 1),
                          COLOR_WHITE, color)

            if self.carPark.get_carpark_slots()[index] is not None:
                if color == COLOR_GREEN:
                    self.carPark.get_carpark_slots()[index].set_occupancy(
                        False)
                    #self.carPark.get_carpark_slots()[index].set_modifiedTime(time.time())
                    #self.carPark.get_carpark_slots()[index].set_modifiedTime(datetime.datetime.now())
                else:
                    self.carPark.get_carpark_slots()[index].set_occupancy(True)
                    #self.carPark.get_carpark_slots()[index].set_modifiedTime(time.time())
                    #self.carPark.get_carpark_slots()[index].set_modifiedTime(datetime.datetime.now())
        return new_frame, self.carPark
    def unique_box(self):
        """
        to store all unique box


        in each and every lick the cordinate will will uploaded and come to this function
        if new box is overlap with the previously build box >>> update the old box as new
        :return:
        """
        coordinates = np.array(self.coordinates)
        open_cv.line(self.image,
                     self.coordinates[2],
                     self.coordinates[3],
                     self.color,
                     1)
        open_cv.line(self.image,
                     self.coordinates[3],
                     self.coordinates[0],
                     self.color,
                     1)
        mid_x=(self.coordinates[0][0]+self.coordinates[1][0]+self.coordinates[2][0]+self.coordinates[3][0])/4
        mid_y=(self.coordinates[0][1]+self.coordinates[1][1]+self.coordinates[2][1]+self.coordinates[3][1])/4
        check=True
        for box in self.saveCordinate:
            x_max=max(self.saveCordinate[box][0][0],self.saveCordinate[box][1][0],self.saveCordinate[box][2][0],self.saveCordinate[box][3][0])
            x_min = min(self.saveCordinate[box][0][0], self.saveCordinate[box][1][0], self.saveCordinate[box][2][0],self.saveCordinate[box][3][0])
            y_max = max(self.saveCordinate[box][0][1], self.saveCordinate[box][1][1], self.saveCordinate[box][2][1],self.saveCordinate[box][3][1])
            y_min = min(self.saveCordinate[box][0][1], self.saveCordinate[box][1][1], self.saveCordinate[box][2][1],self.saveCordinate[box][3][1])
            if(mid_x>x_min and mid_y>y_min and mid_x<x_max and mid_y<y_max ):
                self.saveCordinate[box]=self.coordinates.copy()
                draw_contours(self.image, coordinates, str(box), COLOR_WHITE)
                check=False
                self.ids-=1
                self.click_count = 0
                break
        if check:
            draw_contours(self.image, coordinates, str(self.ids + 1), COLOR_WHITE)
            self.saveCordinate[self.ids+1]=self.coordinates.copy()
            logging.warning(self.saveCordinate)
        for i in range(0, 4):
            self.coordinates.pop()

        self.ids += 1
        self.click_count = 0
Exemplo n.º 7
0
    def detect_motion(self, street_name, frame):
        space_amount = 0

        coordinates_data = self.coordinates_data
        for p in coordinates_data:
            coordinates = self._coordinates(p)
            rect = open_cv.boundingRect(coordinates)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                 dtype=np.uint8),
                                        [new_coordinates],
                                        contourIdx=-1,
                                        color=255,
                                        thickness=-1,
                                        lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)

        statuses = [False] * len(coordinates_data)

        blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
        grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
        new_frame = frame.copy()

        for index, p in enumerate(coordinates_data):
            coordinates = self._coordinates(p)
            statuses[index] = self.__apply(grayed, index, p)
            color = COLOR_GREEN if statuses[index] else COLOR_BLUE
            draw_contours(new_frame, coordinates, str(p["id"] + 1),
                          COLOR_WHITE, color)

        open_cv.imwrite("recognition/images/" + street_name + ".jpg",
                        new_frame)
        space_amount = len(list(filter(lambda x: x == True, statuses)))
        # print(space_amount)
        return space_amount
Exemplo n.º 8
0
    def __handle_done(self):
        open_cv.line(self.image, self.coordinates[2], self.coordinates[3],
                     self.color, 1)
        open_cv.line(self.image, self.coordinates[3], self.coordinates[0],
                     self.color, 1)

        self.click_count = 0
        self.log.info("Writing Coordinates number %d", self.ids)
        coordinates = np.array(self.coordinates)
        self.output.write("- _id : " + str(self.ids) +
                          "\n    ImageCoordinates : \n" + "        --" +
                          str(self.coordinates[0][0]) + "\n         -" +
                          str(self.coordinates[0][1]) + "\n" + "        --" +
                          str(self.coordinates[1][0]) + "\n         -" +
                          str(self.coordinates[1][1]) + "\n" + "        --" +
                          str(self.coordinates[2][0]) + "\n         -" +
                          str(self.coordinates[2][1]) + "\n" + "        --" +
                          str(self.coordinates[3][0]) + "\n         -" +
                          str(self.coordinates[3][1]) + "\n")
        draw_contours(self.image, coordinates, str(self.ids + 1), COLOR_WHITE)
        self.log.info("Writing GPS coordinates, please follow gently")
        print(15 * "=")
        print("Enter Longitutde Point")
        print(15 * "=")
        longitutde = float(input())
        print(15 * "=")
        print("enter lattitude Point")
        print(15 * "=")
        lattitude = float(input())
        self.gpsCoordinates.append((longitutde, lattitude))
        self.output.write("    Gps coordinates : \n" + "        -" +
                          str(self.gpsCoordinates[self.ids][0]) + "\n" +
                          "        -" + str(self.gpsCoordinates[self.ids][1]) +
                          "\n")
        self.output.write("    Status : \n" + "        - L")
        for i in range(0, 4):
            self.coordinates.pop()
        self.log.info("Moving On .. ")
        self.ids += 1
    def detect_motion(self):
        capture = open_cv.VideoCapture(self.video)
        capture.set(3, 400)
        capture.set(4, 600)
        capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)
        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                 dtype=np.uint8),
                                        [new_coordinates],
                                        contourIdx=-1,
                                        color=255,
                                        thickness=-1,
                                        lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        while capture.isOpened():
            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError(
                    "Error reading video capture on frame %s" % str(frame))

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)
            # open_cv.imshow('gray',grayed)
            position_in_seconds = capture.get(
                open_cv.CAP_PROP_POS_MSEC) / 1000.0

            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)
                # lastcolor = "True"
                color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)
                # with open('koordinat.txt', 'w') as f:
                #     f.write(str(coordinates_data))
                lasthitung = 0
                hitung = int(index) + 1
                if statuses[index] != status:
                    if statuses[index] == False:
                        hitung = hitung - 1
                    # else:
                    #     hitung = hitung+1
                    #open_cv.putText(new_frame,"          %d" %hitung, (30, 150),
                    #open_cv.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)
                lasthitung = hitung

            # lastJumlah = 0
            jumlah = int(index) + 1
            #open_cv.putText(new_frame,"jumlah: %d" %jumlah, (30, 95),
            #open_cv.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)
            # hitung = 0

            #teks
            #open_cv.putText(new_frame,"terpakai: ", (30, 150),
            #open_cv.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)
            empty = jumlah - lasthitung
            data = {"empty": 0, "occupied": 1}
            db.child("Keputih").child("Parkir").child("1").update(data)
            # video_str = "https://craggiest-guppy-2675.dataplicity.io/?action=stream?dummy=param.mjpg"
            open_cv.imshow("video_parkir", new_frame)

            k = open_cv.waitKey(1)

            if k == ord("q"):
                break

        capture.release()
        open_cv.destroyAllWindows()
Exemplo n.º 10
0
    def detect_motion(self):
        capture = open_cv.VideoCapture(self.video)
        capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)

        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)
        #print("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)
            #print("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)
            #print("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)
            #print("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                 dtype=np.uint8),
                                        [new_coordinates],
                                        contourIdx=-1,
                                        color=255,
                                        thickness=-1,
                                        lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)
            #print("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        while capture.isOpened():
            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError(
                    "Error reading video capture on frame %s" % str(frame))

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)
            #print("new_frame: %s", new_frame)

            position_in_seconds = capture.get(
                open_cv.CAP_PROP_POS_MSEC) / 1000.0
            #print(capture.get(open_cv.CAP_PROP_POS_MSEC))

            for index, c in enumerate(coordinates_data):
                #print(index,c)
                status = self.__apply(grayed, index, c)
                #print(status)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    #print("wen is it coming here")
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    #print('====> idx: '+str(index)+' I am vacant now, at -- '+ str(datetime.now()))
                    for ind, slot_info in self.my_parking_lot.items():
                        if self.my_parking_lot[ind]['is_vacant'] == True:
                            self.my_parking_lot[ind]['in_time'] = str(
                                datetime.now())
                            print(self.my_parking_lot[ind]['in_time'])
                            self.my_parking_lot[ind]['is_vacant'] = False
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds
                    for ind, slot_info in self.my_parking_lot.items():
                        if self.my_parking_lot[ind]['is_vacant'] == False:
                            self.my_parking_lot[ind]['out_time'] = str(
                                datetime.now())
                            self.my_parking_lot[ind]['is_vacant'] = True
                    #print('====> idx: '+str(index)+' I have a car here now, at -- '+ str(datetime.now(

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)

                color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                #if statuses[index]:
                #print(datetime.now())
                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)

            open_cv.imshow(str(self.video), new_frame)
            k = open_cv.waitKey(1)
            #print(self.my_parking_lot)
            if k == ord("q"):
                break
        capture.release()
        open_cv.destroyAllWindows()
Exemplo n.º 11
0
    def detect_motion(self):
        
        y=0
        f= open("config.txt","r")
        arr = {}
        for _ in range(0,2):
            result = f.readline()
            arr[result.split(' : ')[0]] = result.split(' : ')[-1].split('\n')[0]
        print(arr)
        f.close()
        url=arr['img_url']
        
        position_in_seconds=0
        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)
        

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(
                np.zeros((rect[3], rect[2]), dtype=np.uint8),
                [new_coordinates],
                contourIdx=-1,
                color=255,
                thickness=-1,
                lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        while True:
            img_resp= requests.get(url)
            img_arr= np.array(bytearray(img_resp.content),dtype=np.uint8)
            img = open_cv.imdecode(img_arr, -1)
            scale_percent = 60 # percent of original size
            width = int(img.shape[1] * scale_percent / 150)
            height = int(img.shape[0] * scale_percent / 150)
            dim = (width, height)

            # resize image
            resized = open_cv.resize(img, dim, interpolation = open_cv.INTER_AREA)\
            frame = img
            result=True

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)
            position_in_seconds+=1


            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(statuses, index, status):
                    if position_in_seconds - times[index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        y=index
                        
                        print(str(index+1)+"  :"+str(status))
                        if status == False:
                            f= open("file.txt","w+")
                            f.write('%d ' % index)
                            flag=1
                            f.write('%d' % flag)
                            f.close()

                            #   dump it in the file
                            '''temp = points[y]temp
                            q=temp['coordinates']
                            r=[q[0][0],q[1][0],q[2][0],q[3][0]]
                            x_max=max(r)
                            x_min=min(r)
                            r=[q[0][1],q[1][1],q[2][1],q[3][1]]
                            y_min=min(r)
                            y_max=max(r)
                            cropped_image = new_frame[y_min:y_max,x_min:x_max]
                            #open_cv.imshow("Cropped",cropped_image)
                            open_cv.imwrite('image.jpg',cropped_image)'''
                            
                        else:

                            f= open("file.txt","w+")
                            f.write('%d ' % index)
                            flag=2
                            f.write('%d' % flag)
                            f.close()

                        alpr_thread = threading.Thread(target=self.alpr_call,args=[])
                        alpr_thread.start()
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(statuses, index, status):
                    times[index] = position_in_seconds
             

                
                

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)
                
                if statuses[index]:
                    color = COLOR_YELLOW
                    
                else:
                    color = COLOR_PINK
                                                        
                draw_contours(new_frame, coordinates, str(p["id"] + 1), COLOR_WHITE, color)

            open_cv.imshow("img",new_frame)
            k = open_cv.waitKey(1)
            if k == ord("q"):
                break
        open_cv.destroyAllWindows()
        alpr_thread.join()
Exemplo n.º 12
0
    def detect_motion(self):
        dataLen = []
        capture = open_cv.VideoCapture(self.video)
        capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)

        #testing DB
        #         cnt = 10918

        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                 dtype=np.uint8),
                                        [new_coordinates],
                                        contourIdx=-1,
                                        color=255,
                                        thickness=-1,
                                        lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        while capture.isOpened():

            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError(
                    "Error reading video capture on frame %s" % str(frame))

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)

            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)

            position_in_seconds = capture.get(
                open_cv.CAP_PROP_POS_MSEC) / 1000.0

            #             idvalue = 0 # occupied

            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)

                #                 color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                if statuses[index]:
                    #If empty
                    color = COLOR_GREEN

#                     print('id', p["id"]+1)
                else:
                    color = COLOR_BLUE

                if color == COLOR_GREEN:
                    idvalue = 1  #= empty
                else:
                    idvalue = 0

                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)

                # variables to insert into table
                dVal = datetime.datetime.now()
                psid = p["id"] + 1
                imgpath = 0
                dataLen.append(idvalue)
                print('psid:', psid, '\tvalue:', idvalue, '\ttime', dVal)

                cursor = connection.cursor()
                SQLCommand = (
                    "INSERT INTO [dbo].[parkinginfo] ([psid] ,[idvalue],[dates],[imagepath])VALUES (?,?,?,?)"
                )  #("INSERT INTO
                Values = [psid, idvalue, dVal, 0]
                #                 cnt+=1

                #Processing Query
                cursor.execute(SQLCommand, Values)
                #Commiting any pending transaction to the database.
                connection.commit()


#                 print("Data Successfully Inserted")

            open_cv.imshow(str(self.video), new_frame)
            k = open_cv.waitKey(60)
            if k == ord("q"):
                break
        capture.release()
        open_cv.destroyAllWindows()
        #closing connection
        cursor.close()
        connection.close()
        print(len(dataLen))
Exemplo n.º 13
0
    def detect_motion(self):
        capture = open_cv.VideoCapture(self.video)
        capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)

        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)

            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                 dtype=np.uint8),
                                        [new_coordinates],
                                        contourIdx=-1,
                                        color=255,
                                        thickness=-1,
                                        lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        while capture.isOpened():
            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError(
                    "Error reading video capture on frame %s" % str(frame))

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)

            position_in_seconds = capture.get(
                open_cv.CAP_PROP_POS_MSEC) / 1000.0

            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)

                color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)

                occupied = str(statuses.count(False))
                free = str(statuses.count(True))

                height = int(capture.get(4))
                open_cv.putText(new_frame, f' Occupied {occupied}',
                                (0, height - 10), open_cv.FONT_HERSHEY_SIMPLEX,
                                1, (0, 0, 255), 2, open_cv.LINE_4)

                open_cv.putText(new_frame, f' Free {free}', (300, height - 10),
                                open_cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
                                2, open_cv.LINE_4)

            new_frame = open_cv.resize(new_frame, (1366, 768))

            open_cv.imshow(str(self.video), new_frame)
            k = open_cv.waitKey(1)
            if k == ord("q"):
                break
        capture.release()
        open_cv.destroyAllWindows()
Exemplo n.º 14
0
    def detect_motion(self):
        capture = open_cv.VideoCapture(self.video)
        capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)

        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(
                np.zeros((rect[3], rect[2]), dtype=np.uint8),
                [new_coordinates],
                contourIdx=-1,
                color=255,
                thickness=-1,
                lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)
        # time_log = [0 for i in range(len(statuses))]

        while capture.isOpened():
            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError("Error reading video capture on frame %s" % str(frame))

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)

            position_in_seconds = capture.get(open_cv.CAP_PROP_POS_MSEC) / 1000.0

            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(statuses, index, status):
                    if position_in_seconds - times[index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(statuses, index, status):
                    times[index] = position_in_seconds

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)

                color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                self.centers[p['id']] = draw_contours(new_frame, coordinates, str(p["id"] + 1), COLOR_WHITE, color)



            stats = Counter(statuses)
            vacant = stats[True]
            occupied = stats[False]
            font = open_cv.FONT_HERSHEY_SIMPLEX
            open_cv.putText(new_frame, 'Occupied: ' + str(occupied), (200, 35), font, 1, (0, 0, 255), 2,
                            open_cv.LINE_AA)
            open_cv.putText(new_frame, 'Vacant: ' + str(vacant), (10, 35), font, 1, (0, 255, 0), 2, open_cv.LINE_AA)
            indices = [str(i) for i, x in enumerate(statuses) if x == True]

            open_cv.putText(new_frame, 'vacant locations: ' + " ".join(indices), (21, 418), font, 1, (255, 0, 0), 2, open_cv.LINE_AA)
            for i in range(len(self.centers)):
                open_cv.putText(new_frame, self.waiting_time(self.time_log[i]), self.centers[i], font, 1, (255, 0, 0), 1,
                                open_cv.LINE_AA)
            open_cv.imshow(str(self.video), new_frame)

            k = open_cv.waitKey(1)
            if k == ord("q"):
                break
        capture.release()
        open_cv.destroyAllWindows()
Exemplo n.º 15
0
    def process_algo_per_frame(self, frame, capture, coordinates_data, times, statuses):
        
        zones = []
        
        blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
        grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
        new_frame = frame.copy()
        logging.debug("new_frame: %s", new_frame)

        position_in_seconds = capture.get(open_cv.CAP_PROP_POS_MSEC) / 1000.0
        
        for index, c in enumerate(coordinates_data):
            status = self.__apply(grayed, index, c)

            if times[index] is not None and self.same_status(statuses, index, status):
                times[index] = None
                continue

            if times[index] is not None and self.status_changed(statuses, index, status):
                if position_in_seconds - times[index] >= MotionDetector.DETECT_DELAY:
                    statuses[index] = status
                    if status:
                        if self.previousStatusesCount == 0:
                            self.previousStatusesCount = self.previousStatusesCount + 1
                        self.currentStatusesCount = self.currentStatusesCount + 1
                    times[index] = None
                continue

            if times[index] is None and self.status_changed(statuses, index, status):
                times[index] = position_in_seconds

        for index, p in enumerate(coordinates_data):
            coordinates = self._coordinates(p)

            color = COLOR_GREEN if statuses[index] else COLOR_BLUE
            draw_contours(new_frame, coordinates, str(p["id"] + 1), COLOR_WHITE, color)
                    
                    
        for index, p in enumerate(coordinates_data):
            if statuses[index]:
                #print(str(index + 1))
                zones.append(index + 1)
                #self.waterLevelSlots.get_water_level_slots()[index].set_water_level(False)
            #else:
                #print('OFF') 
                #self.waterLevelSlots.get_water_level_slots()[index].set_water_level(True)
                
        
        if self.previousStatusesCount != self.currentStatusesCount:
            self.previousStatusesCount = self.currentStatusesCount
            #print(str(self.area.get_area_to_light(zones)))
            self.count = self.count + 1;
            self.list.append(str(self.area.get_pedestrian_status(zones)))
            if self.count == 13:
                #print(mode(self.list))
                self.persistPedestrianStatusData(mode(self.list))
                self.count = 0
                self.list = []
                '''
                for item in set(self.list):
                    print(item +" - item "+str(self.list.count(item))+" times")
                print("end---end----end")
                '''
                
                
        else:
            self.previousStatusesCount = 0
        #print("green - " + str(self.countFalse))
        #print("red - " + str(self.countTrue))
                    
        return new_frame, self.area
Exemplo n.º 16
0
    def detect_motion(self, video, hasVideoChanged):
        frames = {}

        if hasVideoChanged == True:

            capture = open_cv.VideoCapture(video)
            coordinates_data = self.coordinates_data
            logging.debug("coordinates data: %s", coordinates_data)

            for p in coordinates_data:
                coordinates = self._coordinates(p)
                logging.debug("coordinates: %s", coordinates)

                rect = open_cv.boundingRect(coordinates)
                logging.debug("rect: %s", rect)

                new_coordinates = coordinates.copy()
                new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
                new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
                logging.debug("new_coordinates: %s", new_coordinates)

                self.contours.append(coordinates)
                self.bounds.append(rect)

                mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                     dtype=np.uint16),
                                            [new_coordinates],
                                            contourIdx=-1,
                                            color=255,
                                            thickness=-1,
                                            lineType=open_cv.LINE_8)

                mask = mask == 255
                self.mask.append(mask)
                logging.debug("mask: %s", self.mask)

            statuses = [False] * len(coordinates_data)
            times = [None] * len(coordinates_data)

            count = 0
            while capture.isOpened():
                result, frame = capture.read()
                if frame is None:
                    break

                if not result:
                    raise CaptureReadError(
                        "Error reading video capture on frame %s" % str(frame))

                blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
                grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
                new_frame = frame.copy()
                logging.debug("new_frame: %s", new_frame)

                position_in_seconds = capture.get(
                    open_cv.CAP_PROP_POS_MSEC) / 1000.0

                for index, c in enumerate(coordinates_data):
                    status = self.__apply(grayed, index, c)

                    if times[index] is not None and self.same_status(
                            statuses, index, status):
                        times[index] = None
                        continue

                    if times[index] is not None and self.status_changed(
                            statuses, index, status):
                        if position_in_seconds - times[
                                index] >= MotionDetector.DETECT_DELAY:
                            statuses[index] = status
                            times[index] = None
                        continue

                    if times[index] is None and self.status_changed(
                            statuses, index, status):
                        times[index] = position_in_seconds

                for index, p in enumerate(coordinates_data):
                    coordinates = self._coordinates(p)

                    color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                    draw_contours(new_frame, coordinates, str(p["id"] + 1),
                                  COLOR_WHITE, color)

                    if self.waterLevelSlots.get_water_level_slots(
                    )[index] is not None:
                        if color == COLOR_GREEN:
                            self.waterLevelSlots.get_water_level_slots(
                            )[index].set_water_level(False)
                            #self.carPark.get_carpark_slots()[index].set_modifiedTime(time.time())
                            #self.carPark.get_carpark_slots()[index].set_modifiedTime(datetime.datetime.now())
                        else:
                            self.waterLevelSlots.get_water_level_slots(
                            )[index].set_water_level(True)
                            #self.carPark.get_carpark_slots()[index].set_modifiedTime(time.time())
                            #self.carPark.get_carpark_slots()[index].set_modifiedTime(datetime.datetime.now())

                frames[count] = new_frame
                count = count + 1

            capture.release()
            return frames, self.waterLevelSlots
Exemplo n.º 17
0
    def detect_motion(self):
        capture = cv2.VideoCapture(self.video)
        #capture.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)

        coordinates_data = self.coordinates_data
        print(len(coordinates_data))
        for i in range(len(coordinates_data)):
            self.updater("False", i)
        logging.debug("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            #print(coordinates)
            logging.debug("coordinates: %s", coordinates)

            rect = cv2.boundingRect(coordinates)
            #print(rect)
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            #print(new_coordinates[:, 0])
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            #print(new_coordinates[:, 1])
            logging.debug("new_coordinates: %s", new_coordinates)
            #print(new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = cv2.drawContours(np.zeros((rect[3], rect[2]),
                                             dtype=np.uint8),
                                    [new_coordinates],
                                    contourIdx=-1,
                                    color=255,
                                    thickness=-1,
                                    lineType=cv2.LINE_8)

            mask = mask == 255
            self.mask.append(mask)

            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        while capture.isOpened():
            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError(
                    "Error reading video capture on frame %s" % str(frame))

            blurred = cv2.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)

            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)

            position_in_seconds = capture.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
            flag = 0
            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        self.updater(str(statuses[index]), index)
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds

            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)

                color = COLOR_GREEN if statuses[index] else COLOR_BLUE
                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)

            cv2.imshow(str(self.video), new_frame)
            k = cv2.waitKey(1)
            if k == ord("q"):
                break
        capture.release()
        cv2.destroyAllWindows()
        self.delete(statuses, index)
Exemplo n.º 18
0
    def detect_motion(self):
        parking_index = list()
        #video_url = "C:/Users/YHJ/PycharmProjects/parkingLot/DetectParking-develop/ParkingLot-master/parking_lot/videos/parking_lot_1.mp4"
        #capture = open_cv.VideoCapture(video_url)
        #capture = open_cv.VideoCapture(self.video)
        capture = open_cv.VideoCapture(self.video)
        print(self.video)
        # capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)
        #capture.set(open_cv.CAP_PROP_FRAME_WIDTH, 640)
        #capture.set(open_cv.CAP_PROP_FRAME_HEIGHT, 480)
        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)

        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)

            rect = open_cv.boundingRect(coordinates)
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            logging.debug("new_coordinates: %s", new_coordinates)

            self.contours.append(coordinates)
            self.bounds.append(rect)

            mask = open_cv.drawContours(np.zeros((rect[3], rect[2]),
                                                 dtype=np.uint8),
                                        [new_coordinates],
                                        contourIdx=-1,
                                        color=255,
                                        thickness=-1,
                                        lineType=open_cv.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        statuses = [False] * len(coordinates_data)
        times = [None] * len(coordinates_data)

        capture = cv2.VideoCapture('sample1.mp4')
        colors = [tuple(255 * np.random.rand(3)) for i in range(5)]
        print("colors : ", colors)
        cnt = 0
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', 9876))
        while capture.isOpened():

            #cnt+=1
            cnt = 1
            stime = time.time()
            #   now1 = datetime.now()
            # past_sec = now1.second()
            #  print("ooooo")
            result, frame = capture.read()
            if result:
                results = tfnet.return_predict(frame)
                for color, result in zip(colors, results):
                    #for result in results :
                    label = result['label']
                    if cnt == 1:
                        #if now1.second
                        if label == "car" or label == "truck":
                            tl = (result['topleft']['x'],
                                  result['topleft']['y'])
                            br = (result['bottomright']['x'],
                                  result['bottomright']['y'])
                            x = (result['bottomright']['x'] -
                                 result['topleft']['x']) / 2
                            y = (result['bottomright']['y'] -
                                 result['topleft']['y']) / 2
                            x_y = (x, y)
                            now = datetime.now()
                            time_str = "종류 : " + str(label) + "\n시간 : " + str(
                                now.hour) + "시 " + str(
                                    now.minute) + "분 " + str(
                                        now.second) + "초\n" + "객체좌표 : " + str(
                                            x_y) + "\n\n\n"
                            f = open("CarData.txt", 'a', encoding='utf-8')
                            f.write(time_str)
                            f.close()
                #cnt = 0
            if frame is None:
                break
            if not result:
                raise CaptureReadError(
                    "Error reading video capture on frame %s" % str(frame))

            blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
            grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
            new_frame = frame.copy()
            logging.debug("new_frame: %s", new_frame)

            position_in_seconds = capture.get(
                open_cv.CAP_PROP_POS_MSEC) / 1000.0

            for index, c in enumerate(coordinates_data):
                status = self.__apply(grayed, index, c)

                if times[index] is not None and self.same_status(
                        statuses, index, status):
                    times[index] = None
                    continue

                if times[index] is not None and self.status_changed(
                        statuses, index, status):
                    if position_in_seconds - times[
                            index] >= MotionDetector.DETECT_DELAY:
                        statuses[index] = status
                        times[index] = None
                    continue

                if times[index] is None and self.status_changed(
                        statuses, index, status):
                    times[index] = position_in_seconds
            global rbuff
            #temp_str = ""
            rbuff = ""
            for index, p in enumerate(coordinates_data):
                coordinates = self._coordinates(p)
                if statuses[index]:
                    color = COLOR_GREEN
                    #temp_str = temp_str + str(int(index)+1) + "1"
                    #rbuff = rbuff + str(int(index)+1) + "1"
                    rbuff = rbuff + "0"
                else:
                    color = COLOR_RED
                    #temp_str = temp_str + str(int(index)+1) + "0"
                    #rbuff = rbuff + str(int(index) + 1) + "0"
                    rbuff = rbuff + "1"
                #color = COLOR_GREEN if statuses[index] else COLOR_RED
                sock.send(rbuff.encode(encoding='utf-8'))
                draw_contours(new_frame, coordinates, str(p["id"] + 1),
                              COLOR_WHITE, color)

            open_cv.imshow("hello guys", new_frame)

            print(rbuff)
            k = open_cv.waitKey(20)
            if k == ord("q"):

                break

        capture.release()
        open_cv.destroyAllWindows()