예제 #1
0
    def __call__(self, context):
        '''
        This function does the following:
        1. Get all frames and frame numbers
        2. Apply background subtractor to get the foreground mask
        3. Threshold and apply filters to foreground mask
        4. Save frame
        5. Get the objects that are valid and add them to context['objects']
        '''
        frame = context['frame'].copy()
        frame_number = context['frame_number']

        # applying the background subtractor to the frame to get the foreground mask
        fg_mask = self.bg_subtractor.apply(frame, None, 0.001)
        # threshold images to get white foreground on blask background
        fg_mask[fg_mask < 240] = 0
        # apply morphological filters to remove noise and fill holes, making foreground more consistent
        fg_mask = self.filter_mask(fg_mask, frame_number)

        if self.save_image:
            utils.save_frame(fg_mask,
                             self.image_dir + "/mask_%04d.png" % frame_number,
                             flip=False)

        context['objects'] = self.detect_vehicles(fg_mask, context)
        context['fg_mask'] = fg_mask
        return context
예제 #2
0
def main():
    log = logging.getLogger("main")

    # creting MOG bg subtractor with 500 frames in cache
    # and shadow detction
    bg_subtractor = cv2.createBackgroundSubtractorMOG2(
        history=500, detectShadows=True)

    # Set up image source
    # You can use also CV2, for some reason it not working for me
    cap = skvideo.io.vreader(VIDEO_SOURCE)

    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, cap, num=500)

    frame_number = -1
    for frame in cap:
        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        frame_number += 1

        utils.save_frame(frame, "./out/frame_%04d.png" % frame_number)

        fg_mask = bg_subtractor.apply(frame, None, 0.001)
        
        utils.save_frame(frame, "./out/fg_mask_%04d.png" % frame_number)
예제 #3
0
    def __call__(self, context):
        frame = context['frame'].copy()
        frame_number = context['frame_number']
        pathes = context['pathes']
        exit_masks = context['exit_masks']
        vehicle_count = context['vehicle_count']

        frame = self.draw_ui(frame, vehicle_count, exit_masks)
        frame = self.draw_pathes(frame, pathes)
        frame = self.draw_boxes(frame, pathes, exit_masks)

        utils.save_frame(frame,
                         self.image_dir + "/processed_%04d.png" % frame_number)

        return context
예제 #4
0
    def __call__(self, context):
        frame = context['frame'].copy()
        frame_number = context['frame_number']
        fg_mask = self.bg_subtractor.apply(frame, None, 0.001)
        fg_mask[fg_mask < 240] = 0
        fg_mask = self.filter_mask(fg_mask, frame_number)
        if self.save_image:
            utils.save_frame(fg_mask,
                             self.image_dir + "/mask_%04d.png" % frame_number,
                             flip=False)

        context['objects'] = self.detect_vehicles(fg_mask, context)
        context['fg_mask'] = fg_mask

        return context
예제 #5
0
    def __call__(self, context):
        frame = context['frame'].copy()
        frame_number = context['frame_number']
        fg_mask =self.bg_subtractor.apply(frame,None,0.001)  #函数可以apply

        #thresholding values
        fg_mask = fg_mask[fg_mask<240] = 0
        fg_mask = self.filter_mask(fg_mask,frame_number)  #frame_number应该没用到

        if self.save_image:
            utils.save_frame(fg_mask,self.image_dir+"mask_{frame_number}".format(frame_number=frame_number),flip=False)

        context['objects'] = self.detect_vehicles(fg_mask,context) #object存放的是一个matches,里面包含一个数组,数组里的元素是一个countour的坐标及中心坐标,
        context['fg_mask'] = fg_mask #filter之后的图像.

        return context
예제 #6
0
    def __call__(self, context):
        frame = context['frame'].copy()
        frame_number = context['frame_number']

        fg_mask = self.bg_subtractor.apply(frame, None, 0.001)
        # just thresholding values
        fg_mask[fg_mask < 240] = 0
        fg_mask = self.filter_mask(fg_mask, frame_number)

        if self.save_image:
            utils.save_frame(fg_mask,
                             self.image_dir + "/mask_%04d.png" % frame_number,
                             flip=False)

        # Qing: Contours' information will be stored as 'object' in the context list
        context['objects'] = self.detect_vehicles(fg_mask, context)
        context['fg_mask'] = fg_mask

        return context
    def __call__(self, context):
        frame = context['frame'].copy()
        frame_number = context['frame_number']
        pathes = context['pathes']
        pathes_speed = context['pathes_speed']
        exit_masks = context['exit_masks']
        vehicle_count = context['vehicle_count']
        pathes_speed_avg_list = context['pathes_speed_avg_list']

        frame = self.draw_ui(frame, vehicle_count, pathes_speed_avg_list,
                             exit_masks)
        frame = self.draw_pathes(frame, pathes)
        frame = self.draw_boxes(frame, pathes, exit_masks)
        frame = self.draw_pathes_speed(frame, pathes, pathes_speed, exit_masks)
        if self.save_image:
            utils.save_frame(
                frame, self.image_dir + "/processed_%04d.png" % frame_number)
        self.video_out.write(frame)
        return context
def main():

    log = logging.getLogger(
        "Main")  # Event Logger and handling the main function
    # Log halnu ko arko Kaam , for Debugging use log.debug , log.info for giving status etc so log saves events for future cross checking

    # creating MOG(Mixture of Gaussians) background subtractor with 500 frames in cache
    # Plus lets make some shadow Detection too

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    # History -  Length of history i.e Number of frames affecting background
    # Detect Shadows - Trade off Speed but use only required

    # Setting up the image source , we can use cv2 also , lets use skvideo

    cap = skvideo.io.vread(
        VIDEO_SOURCE
    )  # We can tweak size of frame and  color/blackNwhite using options #skvideo.io.vread is for ndarray

    # Lets skip 500 frames to train the bg subtractor

    # Training Data
    train_bg_subtractor(bg_subtractor, cap, num=500)

    # Kinda Exception Handling
    frame_number = -1

    for frame in cap:
        if not frame.any():
            log.error("Frame Capture Failed, Stopping...")
            break
        frame_number += 1
        # Now lets save each frame in a folder with below Naming Convention
        utils.save_frame(frame, "./out/frame_%04d.png" % frame_number)

        # USE MASKING
        fg_mask = bg_subtractor.apply(frame, None, 0.001)

        # Save the Masked Frame too in the same folder
        utils.save_frame(frame, "./out/fg_mask_%04d.png" % frame_number)
예제 #9
0
    def __call__(self, context):
        frame = context['frame'].copy()
        frame_number = context['frame_number']
        pathes = context['pathes']
        exit_masks = context['exit_masks']
        vehicle_count = context['vehicle_count']
        car_count = context['car_count']
        van_count = context['van_count']
        truck_count = context['truck_count']
        lane1 = context['lane1']
        lane2 = context['lane2']
        lane3 = context['lane3']
        lane4 = context['lane4']

        frame = self.draw_ui(frame, vehicle_count, car_count, van_count,
                             truck_count, lane1, lane2, lane3, lane4,
                             exit_masks)
        frame = self.draw_pathes(frame, pathes)
        frame = self.draw_boxes(frame, pathes, exit_masks)

        utils.save_frame(frame,
                         self.image_dir + "/processed_%04d.png" % frame_number)

        return context
예제 #10
0
def test(args):
    parser = pointcloud_parser.Parser('rs', args.pcap_file)

    # frame = next(parser.generator())
    # frame = filter_bound(frame, [-10., 6.], [-4., 5.], [-3., 3])
    # frame = frame[:, [0, 1]]
    # x, y = frame[:, 0], frame[:, 1]

    # def dbscan_for_4_eps(epss):
    # for i, eps in enumerate(epss):
    # l_ = DBSCAN(eps=eps).fit(frame).labels_
    # plt.subplot(2, 2, i+1)
    # plt.xlabel(f"eps = {eps}")
    # plt.scatter(x, y, s=4, c=l_)

    # def compare_dbscan_kmeans():
    # l1 = DBSCAN().fit(frame).labels_
    # l2 = KMeans().fit(frame).labels_
    # plt.subplot(121)
    # plt.xlabel("DBSCAN")
    # plt.scatter(x, y, s=4, c=l1)

    # plt.subplot(122)
    # plt.xlabel("KMeans")
    # plt.scatter(x, y, s=4, c=l2)

    # # dbscan_for_4_eps([0.05, 0.1, 0.5, 1])
    # compare_dbscan_kmeans()
    # plt.show()
    # return

    frame_target = 1
    for frame_cnt, frame in enumerate(parser.generator()):
        frame = filter_bound(frame, [-10., 6.], [-4., 5.], [-3., 3])
        x, y = frame[:, 0], frame[:, 1]

        labels = mtt.run(frame[:, [0, 1]])

        if frame_cnt < frame_target:
            continue
        frame_target = int(
            input(f'which next frame you want to go(current: {frame_cnt})?'))

        plt.xlabel(f"frame_cnt = {frame_cnt}")
        plt.scatter(x, y, s=4, c=labels)

        # for i, track in enumerate(mtt.tracks):
        # x_ = [hist.x_ for hist in track.hists]
        # y_ = [hist.y_ for hist in track.hists]
        # if x_ != [] and y_ != []:
        # plt.scatter(x_, y_, marker='x')
        # plt.text(x_[-1], y_[-1], str(id(track))[-4:])

        plt.show()

    return

    if args.use_serialized_frame:
        for frame in load_frame():
            analyze_frame(frame, args)
            if args.mark_contour_data:
                r = input('Continue?')
            else:
                r = 'y'
            if r == 'y':
                continue
            else:
                break
        return

    with open(args.pcap_file, 'rb') as f:
        pcap = dpkt.pcap.Reader(f)
        i = 0
        for timestamp, buf in pcap:
            if len(buf) == 1248:
                frame = pointcloud_parser.parse(buf, 0)
            elif len(buf) == 1290:
                frame = pointcloud_parser.parse(buf[42:], 1)

            if frame is None:
                continue

            if args.reset_frames:
                save_frame(frame.astype('f4'), i)
                i += 1
                continue
            analyze_frame(frame, args)
            if args.mark_contour_data:
                r = input('Continue?')
            else:
                r = 'y'
            if r == 'y':
                i += 1
                continue
            else:
                i += 1
                break
def main():
    # creating exit mask from points, where we will be counting our vehicles
    global exit_mask
    global vehicle_count
    global car_count
    global truck_count
    global bike_count
    global sum_of_exit_mask

    img = cv2.imread(IMAGE_SOURCE)

    _img = np.zeros(img.shape, img.dtype)
    _img[:, :] = EXIT_COLOR
    mask = cv2.bitwise_and(_img, _img, mask=exit_mask)
    cv2.addWeighted(mask, 1, img, 1, 0, img)
    show_me(img, text="Added weigth to mask", show_output=SHOW_OUTPUT)

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    capRun = skvideo.io.vreader(VIDEO_SOURCE)

    vidObj = cv2.VideoCapture(VIDEO_SOURCE)
    old_time = datetime.datetime.now()

    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, capRun, num=500)

    _frame_number = -1
    frame_number = -1
    pathes = []
    for frame in capRun:
        if not frame.any():
            print("Frame capture failed, stopping...")
            break

        do_not_need1, do_not_need2 = vidObj.read()

        _frame_number += 1
        if _frame_number % 2 != 0:
            continue
        frame_number += 1

        show_me(frame,
                text="Frame " + str(frame_number),
                show_output=SHOW_OUTPUT)
        fg_mask = bg_subtractor.apply(frame, None, 0.001)
        show_me(fg_mask,
                text="After Background Subtraction",
                show_output=SHOW_OUTPUT)
        # fg_mask[fg_mask < 175] = 0
        # show_me(fg_mask, text="Frame after thresholding",
        #         show_output=SHOW_OUTPUT)

        # # Perform morphology
        # se = np.ones((7, 7), dtype='uint8')
        # image_close = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, se)

        # show_me(image_close, text="Mask",
        #         show_output=SHOW_OUTPUT)

        fg_mask[fg_mask < 175] = 0
        show_me(fg_mask,
                text="Frame after thresholding",
                show_output=SHOW_OUTPUT)
        fg_mask = filter_mask(fg_mask)
        show_me(fg_mask, text="Frame after filtering", show_output=SHOW_OUTPUT)

        fg_mask_area = cv2.bitwise_and(fg_mask, fg_mask, mask=exit_mask)
        show_me(fg_mask_area,
                text="Frame after BitWise And",
                show_output=SHOW_OUTPUT)

        sum_of_fg_mask = 0
        row = 0
        for outer in fg_mask_area:
            # print(outer)
            # print(type(outer))
            #sum_of_fg_mask += list(outer).count(255) * distance(row)
            sum_of_fg_mask += np.count_nonzero(outer == 255) * distance(row)
            row += 1

        percentageActual = (sum_of_fg_mask / sum_of_exit_mask) * 100
        #print("Percentage calculated with distance considered: " + str(percentageActual))

        # percentage = cv2.countNonZero(
        #     fg_mask_area) / (cv2.countNonZero(exit_mask)) * 100
        # print("Percentage calculated without distance considered: " + str(percentage))

        # print(len(_img.shape))
        temp = cv2.merge((fg_mask_area, fg_mask_area, fg_mask_area))
        mask = cv2.bitwise_and(_img, _img, mask=exit_mask)
        cv2.addWeighted(mask, 1, temp, 1, 0, temp)
        show_me(frame, text="Frame after Percentage", show_output=SHOW_OUTPUT)

        # objects Detected
        matches = []

        # Pass the image into the NN
        result = tfnet.return_predict(frame)
        #print("Count1:" + str(len(result)))

        for detected in result:
            l = detected["label"]
            x = detected["topleft"]["x"]
            y = detected["topleft"]["y"]
            w = detected["bottomright"]["x"] - detected["topleft"]["x"]
            h = detected["bottomright"]["y"] - detected["topleft"]["y"]
            # print(l,x,y,w,h)
            contour_valid = (w >= min_contour_width) and (
                h >= min_contour_height) and (w <= max_contour_width) and (
                    h <= max_contour_height)
            if not contour_valid:
                continue
            centroid = utils.get_centroid(x, y, w, h)
            matches.append((l, (x, y, w, h), centroid))
        if not pathes:
            # print("Creating Pathes")
            for match in matches:
                pathes.append([match])
        else:
            new_pathes = []
            for path in pathes:
                # print("Initial path is: ",path)
                _min = 999999
                _match = None
                for p in matches:
                    if (len(path) == 1):
                        d = utils.distance(p[1], path[-1][1])
                    else:
                        # eg: [2,4,6] -> 2*4 - 2 = 6
                        xn = 2 * path[-1][1][0] - path[-2][1][0]
                        yn = 2 * path[-1][1][1] - path[-2][1][1]
                        d = utils.distance(p[1], (xn, yn),
                                           x_weight=x_weight,
                                           y_weight=y_weight)
                    if d < _min:
                        _min = d
                        _match = p
                if _match and _min <= max_dst:
                    # print("Found point: ",_match)
                    matches.remove(_match)  # Remove form current points
                    path.append(_match)  # Add to path
                    # print("Path is: ",path)
                    # Have a list of new paths incase a point did not move
                    new_pathes.append(path)
                # do not drop path if current frame has no matches
                if _match is None:
                    new_pathes.append(path)
            pathes = new_pathes
            if len(matches):
                for p in matches:
                    # print(p)
                    # do not add points that already should be counted
                    # if check_exit(p[2]):
                    #     continue
                    pathes.append([p])
        # save only last N points in every path in pathes
        for i, _ in enumerate(pathes):
            pathes[i] = pathes[i][path_size * -1:]
        # print(pathes)
        # Count vehicles entering exit zone
        new_pathes = []
        for i, path in enumerate(pathes):
            d = path[-2:]
            if (
                    # need at least two points to count
                    len(d) >= 2 and
                    # prev point not in exit zone
                    check_exit(d[0][2]) and
                    # current point in exit zone
                    not check_exit(d[1][2]) and
                    # path len is bigger then min
                    path_size <= len(path)):
                vehicle_count += 1
                vehicle = 'car'
                if (path[-1][0] == 'car'):
                    car_count += 1
                if (path[-1][0] == 'truck'):
                    vehicle = 'truck'
                    truck_count += 1
                # if(path[-1][0] == 'motorbike'):
                #     bike_count += 1
                # Adding timestamp to data
                msec = vidObj.get(cv2.CAP_PROP_POS_MSEC)
                time = old_time + datetime.timedelta(milliseconds=msec)
                # Adding direction to data
                simulation['list'].append({
                    'time': time,
                    'type': vehicle,
                    'direction': 'in'
                })
                # print(data)
                new_pathes.append(path)
            else:
                # prevent linking with path that already in exit zone
                # add = True
                # for p in path:
                #     if check_exit(p[2]):
                #         add = False
                #         break
                # if add:
                #     new_pathes.append(path)
                new_pathes.append(path)
        pathes = new_pathes
        #################################################
        # Speed
        #################################################
        # print(pathes)
        sumPixelDifference = 0
        for path in pathes:
            if len(path) > 1:
                sumPixelDifference += utils.distance(path[-1][2], path[-2][2])
                # print(sumPixelDifference)
        # print("-------------------")
        #print(sumPixelDifference / len(pathes))
        avgSpeed = sumPixelDifference / len(pathes) * speedForOnePixelPerFrame
        #print("Count2: " + str(len(pathes)))
        #################################################
        # VISUALIZATION
        #################################################
        # TOP BAR
        cv2.rectangle(frame, (0, 0), (frame.shape[1], 50), (0, 0, 0),
                      cv2.FILLED)
        cv2.putText(frame, (
            "Vehicles: {total} - Cars: {cars} - Trucks: {trucks} - Percentage: {percentage} - Avg Speed: {avgSpeed}km/hr"
            .format(total=vehicle_count,
                    cars=car_count,
                    trucks=truck_count,
                    percentage=str("{0:.2f}".format(percentageActual)),
                    avgSpeed=str("{0:.2f}".format(avgSpeed)))), (30, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1)
        # MASK1
        # print(exit_mask)
        _frame = np.zeros(frame.shape, frame.dtype)
        # show_me(_img, text = "Numpy array initialized to zeros",show_output = self.show_output)
        _frame[:, :] = EXIT_COLOR
        # show_me(_img, text = "Set it to green",show_output = self.show_output)
        mask = cv2.bitwise_and(_frame, _frame, mask=exit_mask)
        # show_me(mask, text = "Set Mask color",show_output = SHOW_OUTPUT)
        cv2.addWeighted(mask, 1, frame, 1, 0, frame)
        show_me(frame, text="Added weigth to mask", show_output=SHOW_OUTPUT)
        # BOXES
        # PATHS
        # print(pathes)
        for i, path in enumerate(pathes):
            # print(path)
            centroid = np.array(path)[:, 2].tolist()
            contour = path[-1][1]
            # print(contour)
            x, y, w, h = contour
            cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1),
                          BOUNDING_BOX_COLOUR, 1)
            for point in centroid:
                cv2.circle(frame, point, 2, CAR_COLOURS[0], -1)
                cv2.polylines(frame, [np.int32(centroid)], False,
                              CAR_COLOURS[0], 1)
        show_me(frame, text="Created Paths", show_output=SHOW_OUTPUT)
        print("Frame number: " + str(frame_number) + "  ||  " +
              "Vehicle Count: " + str(vehicle_count))
        utils.save_frame(frame, "OUTPUT/processed_%04d.png" % frame_number)
        data["list"].append({
            "frameNo":
            frame_number,
            "Vehicles":
            vehicle_count,
            "Cars":
            car_count,
            "Trucks":
            truck_count,
            "Percentage":
            str("{0:.2f}".format(percentageActual)),
            "Speed":
            str("{0:.2f}".format(avgSpeed))
        })
        with open('output.txt', 'w') as jsonFile:
            json.dump(data, jsonFile, default=myconverter)
        with open('simulation.txt', 'w') as jsonFile:
            json.dump(simulation, jsonFile, default=myconverter)