예제 #1
0
    def detect_vehicles(self, fg_mask, context):
        '''
        1. We get the bounding rectangles of each foreground object using contour information.
        2. These rectangles are filtered using the min height and weight.
        3. The valid contours along with their centers are returned.
        '''
        matches = []

        #finding only external contours
        #using Teh-Chin chain approximation algorithm (faster)
        im, contours, hierarchy = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_TC89_L1)

        # filering the contours by width, height
        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            #if valid contour, get the center of the bounding box
            centroid = utils.get_centroid(x, y, w, h)
            matches.append(((x, y, w, h), centroid))
        return matches
    def detect_vehicles(self, fg_mask, context):

        matches = []

        # finding external contours
        contours, hierarchy = cv2.findContours(
            fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)

        print "############calculate object features################"    

        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue
            cout = obj_class.cal_features(x, y, w, h, fg_mask)    
            centroid = utils.get_centroid(x, y, w, h)
            cv2.putText(context['frame'], str((cout,centroid)), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)
            #obj_class.features(context['objects'],  context['fg_mask'])

            matches.append(((x, y, w, h), centroid))
        #Matches.append(matches)
        return matches
예제 #3
0
    def load_items(self, items: list):
        helper_characteristics = {}
        for item in items:
            for characteristic in item.characteristics:
                if (characteristic not in helper_characteristics):
                    helper_characteristics[characteristic] = []
                helper_characteristics[characteristic].append(
                    item.characteristics[characteristic])
        characteristics = {}

        for characteristic in helper_characteristics:
            characteristics[characteristic] = Characteristic(
                name=characteristic,
                data=helper_characteristics[characteristic],
                centroide=get_centroid(helper_characteristics[characteristic]),
            )
        characteristics = OrderedDict(characteristics)
        z = []
        helper_items = []
        counter_charac = 0
        for characteristic in characteristics:
            data = characteristics[characteristic].data
            i = 0
            for item in data:
                if (counter_charac) == 0:
                    helper_items.append([])
                helper_items[i].append(item)
                i += 1
            z.append(characteristics[characteristic].centroide)
            counter_charac += 1
        self.z = z
        self.items = (helper_items)
        self.characteristics = characteristics
        return self
예제 #4
0
    def build(self):

        if self.cells is None:
            self.find_cells()

        ocr_data = ocr(self.image)

        table_data = []
        for row in self.cells:
            table_data.append([])
            for _ in row:
                table_data[-1].append([])

        for i, data in ocr_data.iterrows():
            centroid = get_centroid(data['left'], data['left'] + data['width'],
                                    data['top'], data['top'] + data['height'])

            cell = self.find_cell_for_point(centroid)
            if cell is not None:
                table_data[cell[0]][cell[1]].append(data['text'])

        for i, row in enumerate(table_data):
            for j, cell in enumerate(row):
                if len(cell) == 0:
                    table_data[i][j] = None
                else:
                    table_data[i][j] = ' '.join(cell)

        self.data = table_data
예제 #5
0
    def detect_vehicles(self, fg_mask, context):

        matches = []

        # finding external contours
        if self.major == '3':
            _, contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_TC89_L1)
        else:
            contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_TC89_L1)

        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            centroid = utils.get_centroid(x, y, w, h)

            matches.append(((x, y, w, h), centroid))

        return matches
예제 #6
0
    def build_cutter(self,
                     region_id,
                     avoid_id,
                     adjacent_id,
                     FACTOR,
                     op='valve',
                     smooth_iter=50):
        """
        Build cutter for aorta and la

        Args:
            label: original SimpleITK image
            region_id: id of aorta or LA to build cutter
            avoid_id: id of aorta or LA to avoid cutting into
            op: 'valve' or 'tissue', option for normal direction
        """
        cut_Im = vtk.vtkImageData()
        cut_Im.DeepCopy(self.label)
        #locate centroid of mitral plane or aortic plane
        pts = utils.locateRegionBoundary(cut_Im,
                                         adjacent_id,
                                         region_id,
                                         size=2.)
        ctr_valve = np.mean(pts, axis=0)

        from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
        vtkpts = vtk.vtkPoints()
        vtkpts.SetData(numpy_to_vtk(pts))
        #centroid of left atrium or aorta
        ctr = utils.get_centroid(cut_Im, region_id)
        #center and nrm of the cutting plane
        length = np.linalg.norm(ctr - ctr_valve)
        nrm_tissue = (ctr - ctr_valve) / length
        nrm_valve_plane = utils.fit_plane_normal(pts)
        #check normal direction
        if op == 'valve':
            #nrm = nrm_valve_plane
            #if np.dot(nrm_tissue, nrm_valve_plane)<0:
            #    nrm =  -1 *nrm
            nrm = nrm_tissue
        elif op == 'tissue':
            nrm = nrm_tissue
            #nrm = nrm_valve_plane
            #if np.dot(nrm_tissue, nrm_valve_plane)<0:
            #    nrm =  -1 *nrm
        else:
            raise ValueError("Incorrect option")
        ori = ctr_valve + FACTOR * nrm / np.linalg.norm(nrm)
        #dilate by a little bit
        cut_Im = utils.label_dilate_erode(
            utils.recolor_vtk_pixels_by_plane(cut_Im, ori, -1. * nrm, 10,
                                              avoid_id), region_id, 0, 8.)
        cut_Im = utils.label_dilate_erode(cut_Im, avoid_id, region_id, 2)

        # marching cube
        cutter = m_c.vtk_marching_cube(cut_Im, region_id, 20, 0.05)
        return cutter, (ctr_valve, nrm)
예제 #7
0
 def update_model(self, value):
     for key in value:
         self.characteristics[key].data.append(value[key])
         self.characteristics[key].centroide = get_centroid(
             self.characteristics[key].data)
     characteristics = OrderedDict(self.characteristics)
     z = []
     for characteristic in characteristics:
         z.append(characteristics[characteristic].centroide)
     self.z = z
예제 #8
0
def dunn_index(clusters):
    clusters = list(filter(lambda c: len(c) != 0, clusters))
    n = len(clusters)

    min_inter_dist = None
    max_intra_dist = None

    for i in range(n):
        for j in range(n):

            if i == j:
                continue

            cl_i = clusters[i]
            cl_j = clusters[j]

            c_i = get_centroid(cl_i)
            c_j = get_centroid(cl_j)

            if c_i is None or c_j is None:
                continue

            dist = euclidean_distance(c_i, c_j)

            if min_inter_dist is None:
                min_inter_dist = dist
            elif dist < min_inter_dist:
                min_inter_dist = dist

    for c in clusters:
        intra_dist = maximal_intra_distance(c)

        if max_intra_dist is None:
            max_intra_dist = intra_dist
        elif intra_dist > max_intra_dist:
            max_intra_dist = intra_dist

    return min_inter_dist / max_intra_dist
예제 #9
0
    def detect_vehicles(self, fg_mask, context):

        matches = []
        print("Detecting vehicles!")
        contours, hierarchy = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_TC89_L1)

        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            centroid = utils.get_centroid(x, y, w, h)

            matches.append(((x, y, w, h), centroid))

        return matches
    def detect_vehicles(self, fg_mask, context):

        matches = []

        # finding external contours
        im2, contours, hierarchy = cv2.findContours(
            fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)

        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            centroid = utils.get_centroid(x, y, w, h)
# important here for determining what the value of context will be
            matches.append(((x, y, w, h), centroid))

        return matches
    def detect_vehicles(self, fg_mask, context):

        matches = []

        contours, hierarchy = cv2.findContours(
            fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)
#used to save the number of contours
        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            #green rectangle
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            centroid = utils.get_centroid(x, y, w, h)
#used to return the centroid of vehicle
            matches.append(((x, y, w, h), centroid))

        return matches
    def detect_vehicles(self, fg_mask, context):
        # https://docs.opencv.org/3.4.2/d4/d73/tutorial_py_contours_begin.html
        matches = []

        # finding external contours
        im2, contours, hierarchy = cv2.findContours(
            fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)

        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            centroid = utils.get_centroid(x, y, w, h)

            matches.append(((x, y, w, h), centroid))

        return matches # Meta deta for all detected contours in the foreground using the background subraction method
    def detect_vehicles(self, fg_mask, context):
        matches = []
        # finding external contours
        im2, contours, hierachy = cv2.findContours(
            fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
        )

        for (i, contour) in enumerate(contours):
            (x, y, w, h) = cv2.boundingRect(contour)

            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            centroid = utils.get_centroid(x, y, w, h)
            matches.append(((x, y, w, h), centroid))

        self.log.debug('#MATCHES FOUND: %s' % len(matches))
        return matches
예제 #14
0
    def detect_vehicles(self, fg_mask, context):

        matches = []

        # finding external contours
        im2, contours, hierarchy = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_TC89_L1)

        for (i, contour) in enumerate(contours):
            # Qing: boundingRect is not to draw the contour. it is for getting the minimum rectanglar contour data
            # Qing: x,y-upper left corner pixel position, w-width, h-height
            (x, y, w, h) = cv2.boundingRect(contour)
            contour_valid = (w >= self.min_contour_width) and (
                h >= self.min_contour_height)

            if not contour_valid:
                continue

            # Qing: Centroid is for checking if a vehicle enters the counting zone
            centroid = utils.get_centroid(x, y, w, h)

            matches.append(((x, y, w, h), centroid))

        return matches
예제 #15
0
        results = tfnet.return_predict(frame)
        #for l in searchlabels:
        #    qtdp = len([i for i in results if i['label']==l and i['confidence'] > confidence])
        #    print(qtdp,l,'found')
        matches = []
        for result in results:
            tl = (result['topleft']['x'], result['topleft']['y'])
            br = (result['bottomright']['x'], result['bottomright']['y'])
            label = result['label']
            if label not in colors:
                colors[label] = 200 * np.random.rand(3)
            frame = cv2.rectangle(frame, tl, br, colors[label], 3)
            frame = cv2.putText(frame, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1,
                                (0, 0, 0), 2)

            x, y = tl
            w, h = (br[0] - tl[0], br[1] - tl[1])
            centroid = utils.get_centroid(x, y, w, h)
            matches.append(((x, y, w, h), centroid))
            #print(matches)
        vc(matches)
        context['frame'] = frame
        context = vis(context)

        cv2.imshow('frame', frame)
        print('FPS {:.1f}'.format(1 / (time.time() - stime)))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        pass
예제 #16
0
    def augment_map(kp, matches, image, image_prepared):
        # Calculates source and destination points
        src_pts = np.float32([
            image_prepared.keypoints[m.queryIdx]['pt'] for m in matches
        ]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp[m.trainIdx].pt
                              for m in matches]).reshape(-1, 1, 2)
        if AugmentedMaps.debug:
            print('Calculating Homography')
        #homography
        matrix, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        # Get width and height from image
        h, w, __ = np.shape(image)

        # Converts color namespace from BGR to RGB
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # Verifies if the image map has any Point of Interest
        if len(image_prepared.interestPoints) > 0:
            # Gets the nearest Point of Interest from the center
            try:
                if AugmentedMaps.debug:
                    print('Calculating nearest interesting point')
                nearest_interestpoint = utils.get_nearest_interestpoint(
                    image_prepared, matrix, w, h)
            except:
                return image
            # Resize the image of the Point of Interest
            interestImage = cv2.cvtColor(nearest_interestpoint[3],
                                         cv2.COLOR_BGR2RGB)
            if image.shape[0] > image.shape[1]:
                interestPointImage = cv2.resize(
                    interestImage,
                    (int(0.30 * image.shape[1]), int(0.25 * image.shape[0])),
                    interpolation=cv2.INTER_CUBIC)
            elif image.shape[0] <= image.shape[1]:
                interestPointImage = cv2.resize(
                    interestImage,
                    (int(0.25 * image.shape[1]), int(0.30 * image.shape[0])),
                    interpolation=cv2.INTER_CUBIC)
            else:
                interestPointImage = cv2.resize(
                    interestImage,
                    (int(0.30 * image.shape[1]), int(0.30 * image.shape[0])),
                    interpolation=cv2.INTER_CUBIC)

            # Calculates the centroid of the Point of Interest image to be drawn
            interestPointCentroid = utils.get_centroid(
                (nearest_interestpoint[0][0][0],
                 nearest_interestpoint[0][1][0],
                 nearest_interestpoint[0][2][0],
                 nearest_interestpoint[0][3][0]))

            # Verifies the location of the Point of Interest and calculates the position of the its image associated to be drawn
            if interestPointCentroid[0] < w / 2:
                interesPointImageXi = w - interestPointImage.shape[1]
                interesPointImageYi = h - interestPointImage.shape[0]
                interesPointImageXf = w
                interesPointImageYf = h
                interestPointImageCorderX = interesPointImageXi
                interestPointImageCorderY = interesPointImageYi - 29
            else:
                interesPointImageXi = 0
                interesPointImageYi = h - interestPointImage.shape[0]
                interesPointImageXf = interestPointImage.shape[1]
                interesPointImageYf = h
                interestPointImageCorderX = interesPointImageXf
                interestPointImageCorderY = interesPointImageYi - 29

            if AugmentedMaps.debug:
                print('Calculating projection to draw pyramid')
            projection = utils.projection_matrix(AugmentedMaps.MTX, matrix)
            image = utils.render(image, projection, w / 2, h / 2)
            # Draw image of the Point of Interest in the map
            image[interesPointImageYi:interesPointImageYf,
                  interesPointImageXi:interesPointImageXf] = interestPointImage
            if AugmentedMaps.debug:
                print('Drwaing interesting point image')
            # Draws an header for the Point of Interest Image
            headerPts = utils.get_header_points(interesPointImageXi,
                                                interesPointImageYi,
                                                interesPointImageXf)

            image = cv2.fillPoly(image, [np.int32(headerPts)], (255, 255, 255))

            # Draws a line from the header to the center of the nearest Point of Interest
            cv2.line(
                image,
                (int(interestPointCentroid[0]), int(interestPointCentroid[1])),
                (int(interestPointImageCorderX),
                 int(interestPointImageCorderY)), (255, 255, 255), 2)

            # Calculates distance between the center and the Point of Interest
            scale = image_prepared.scale
            interestPointDistance = int(scale * nearest_interestpoint[1])

            interestPointText = nearest_interestpoint[2] + \
                " - " + str(interestPointDistance) + " m"

            # Draw name of the Point of Interest
            cv2.putText(
                image, interestPointText,
                (int(headerPts[1][0][0] + 5), int(headerPts[1][0][1] - 10)),
                cv2.FONT_HERSHEY_SIMPLEX, 0.4, 0)

            # Draws the location of the nearest Point of Interest
            image = cv2.polylines(image, [np.int32(nearest_interestpoint[0])],
                                  True, 255, 3, cv2.LINE_AA)

        if AugmentedMaps.debug:
            print('Drawing compass')
        # Gets the points of the compass
        pts_compass = utils.get_compass_points(w, h)

        # Project corners into frame
        dst_compass = cv2.perspectiveTransform(pts_compass, matrix)

        wDiff = int(w / 2 + 30 - dst_compass[0][0][0])
        hDiff = int(h / 2 - dst_compass[0][0][1])

        dst_compass[0][0][0] = dst_compass[0][0][0] + wDiff
        dst_compass[0][0][1] = dst_compass[0][0][1] + hDiff
        dst_compass[1][0][0] = dst_compass[1][0][0] + wDiff
        dst_compass[1][0][1] = dst_compass[1][0][1] + hDiff
        dst_compass[2][0][0] = dst_compass[2][0][0] + wDiff
        dst_compass[2][0][1] = dst_compass[2][0][1] + hDiff
        dst_compass[3][0][0] = dst_compass[3][0][0] + wDiff
        dst_compass[3][0][1] = dst_compass[3][0][1] + hDiff

        # Connect the corners of the compass with lines
        image = cv2.polylines(image, [np.int32(dst_compass)], True, 0, 2,
                              cv2.LINE_AA)

        image = cv2.fillPoly(
            image,
            [np.int32([dst_compass[0], dst_compass[2], dst_compass[3]])],
            (150, 0, 0))

        image = cv2.fillPoly(
            image,
            [np.int32([dst_compass[0], dst_compass[1], dst_compass[2]])],
            (0, 0, 150))

        # Draws a circle at the center of the map
        image = utils.draw_center_map(image, w, h)

        return image
def main():
    # creating exit mask from points, where we will be counting our vehicles
    global exit_mask
    global vehicle_count
    global car_count
    global truck_count
    global bike_count
    global sum_of_exit_mask

    img = cv2.imread(IMAGE_SOURCE)

    _img = np.zeros(img.shape, img.dtype)
    _img[:, :] = EXIT_COLOR
    mask = cv2.bitwise_and(_img, _img, mask=exit_mask)
    cv2.addWeighted(mask, 1, img, 1, 0, img)
    show_me(img, text="Added weigth to mask", show_output=SHOW_OUTPUT)

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    capRun = skvideo.io.vreader(VIDEO_SOURCE)

    vidObj = cv2.VideoCapture(VIDEO_SOURCE)
    old_time = datetime.datetime.now()

    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, capRun, num=500)

    _frame_number = -1
    frame_number = -1
    pathes = []
    for frame in capRun:
        if not frame.any():
            print("Frame capture failed, stopping...")
            break

        do_not_need1, do_not_need2 = vidObj.read()

        _frame_number += 1
        if _frame_number % 2 != 0:
            continue
        frame_number += 1

        show_me(frame,
                text="Frame " + str(frame_number),
                show_output=SHOW_OUTPUT)
        fg_mask = bg_subtractor.apply(frame, None, 0.001)
        show_me(fg_mask,
                text="After Background Subtraction",
                show_output=SHOW_OUTPUT)
        # fg_mask[fg_mask < 175] = 0
        # show_me(fg_mask, text="Frame after thresholding",
        #         show_output=SHOW_OUTPUT)

        # # Perform morphology
        # se = np.ones((7, 7), dtype='uint8')
        # image_close = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, se)

        # show_me(image_close, text="Mask",
        #         show_output=SHOW_OUTPUT)

        fg_mask[fg_mask < 175] = 0
        show_me(fg_mask,
                text="Frame after thresholding",
                show_output=SHOW_OUTPUT)
        fg_mask = filter_mask(fg_mask)
        show_me(fg_mask, text="Frame after filtering", show_output=SHOW_OUTPUT)

        fg_mask_area = cv2.bitwise_and(fg_mask, fg_mask, mask=exit_mask)
        show_me(fg_mask_area,
                text="Frame after BitWise And",
                show_output=SHOW_OUTPUT)

        sum_of_fg_mask = 0
        row = 0
        for outer in fg_mask_area:
            # print(outer)
            # print(type(outer))
            #sum_of_fg_mask += list(outer).count(255) * distance(row)
            sum_of_fg_mask += np.count_nonzero(outer == 255) * distance(row)
            row += 1

        percentageActual = (sum_of_fg_mask / sum_of_exit_mask) * 100
        #print("Percentage calculated with distance considered: " + str(percentageActual))

        # percentage = cv2.countNonZero(
        #     fg_mask_area) / (cv2.countNonZero(exit_mask)) * 100
        # print("Percentage calculated without distance considered: " + str(percentage))

        # print(len(_img.shape))
        temp = cv2.merge((fg_mask_area, fg_mask_area, fg_mask_area))
        mask = cv2.bitwise_and(_img, _img, mask=exit_mask)
        cv2.addWeighted(mask, 1, temp, 1, 0, temp)
        show_me(frame, text="Frame after Percentage", show_output=SHOW_OUTPUT)

        # objects Detected
        matches = []

        # Pass the image into the NN
        result = tfnet.return_predict(frame)
        #print("Count1:" + str(len(result)))

        for detected in result:
            l = detected["label"]
            x = detected["topleft"]["x"]
            y = detected["topleft"]["y"]
            w = detected["bottomright"]["x"] - detected["topleft"]["x"]
            h = detected["bottomright"]["y"] - detected["topleft"]["y"]
            # print(l,x,y,w,h)
            contour_valid = (w >= min_contour_width) and (
                h >= min_contour_height) and (w <= max_contour_width) and (
                    h <= max_contour_height)
            if not contour_valid:
                continue
            centroid = utils.get_centroid(x, y, w, h)
            matches.append((l, (x, y, w, h), centroid))
        if not pathes:
            # print("Creating Pathes")
            for match in matches:
                pathes.append([match])
        else:
            new_pathes = []
            for path in pathes:
                # print("Initial path is: ",path)
                _min = 999999
                _match = None
                for p in matches:
                    if (len(path) == 1):
                        d = utils.distance(p[1], path[-1][1])
                    else:
                        # eg: [2,4,6] -> 2*4 - 2 = 6
                        xn = 2 * path[-1][1][0] - path[-2][1][0]
                        yn = 2 * path[-1][1][1] - path[-2][1][1]
                        d = utils.distance(p[1], (xn, yn),
                                           x_weight=x_weight,
                                           y_weight=y_weight)
                    if d < _min:
                        _min = d
                        _match = p
                if _match and _min <= max_dst:
                    # print("Found point: ",_match)
                    matches.remove(_match)  # Remove form current points
                    path.append(_match)  # Add to path
                    # print("Path is: ",path)
                    # Have a list of new paths incase a point did not move
                    new_pathes.append(path)
                # do not drop path if current frame has no matches
                if _match is None:
                    new_pathes.append(path)
            pathes = new_pathes
            if len(matches):
                for p in matches:
                    # print(p)
                    # do not add points that already should be counted
                    # if check_exit(p[2]):
                    #     continue
                    pathes.append([p])
        # save only last N points in every path in pathes
        for i, _ in enumerate(pathes):
            pathes[i] = pathes[i][path_size * -1:]
        # print(pathes)
        # Count vehicles entering exit zone
        new_pathes = []
        for i, path in enumerate(pathes):
            d = path[-2:]
            if (
                    # need at least two points to count
                    len(d) >= 2 and
                    # prev point not in exit zone
                    check_exit(d[0][2]) and
                    # current point in exit zone
                    not check_exit(d[1][2]) and
                    # path len is bigger then min
                    path_size <= len(path)):
                vehicle_count += 1
                vehicle = 'car'
                if (path[-1][0] == 'car'):
                    car_count += 1
                if (path[-1][0] == 'truck'):
                    vehicle = 'truck'
                    truck_count += 1
                # if(path[-1][0] == 'motorbike'):
                #     bike_count += 1
                # Adding timestamp to data
                msec = vidObj.get(cv2.CAP_PROP_POS_MSEC)
                time = old_time + datetime.timedelta(milliseconds=msec)
                # Adding direction to data
                simulation['list'].append({
                    'time': time,
                    'type': vehicle,
                    'direction': 'in'
                })
                # print(data)
                new_pathes.append(path)
            else:
                # prevent linking with path that already in exit zone
                # add = True
                # for p in path:
                #     if check_exit(p[2]):
                #         add = False
                #         break
                # if add:
                #     new_pathes.append(path)
                new_pathes.append(path)
        pathes = new_pathes
        #################################################
        # Speed
        #################################################
        # print(pathes)
        sumPixelDifference = 0
        for path in pathes:
            if len(path) > 1:
                sumPixelDifference += utils.distance(path[-1][2], path[-2][2])
                # print(sumPixelDifference)
        # print("-------------------")
        #print(sumPixelDifference / len(pathes))
        avgSpeed = sumPixelDifference / len(pathes) * speedForOnePixelPerFrame
        #print("Count2: " + str(len(pathes)))
        #################################################
        # VISUALIZATION
        #################################################
        # TOP BAR
        cv2.rectangle(frame, (0, 0), (frame.shape[1], 50), (0, 0, 0),
                      cv2.FILLED)
        cv2.putText(frame, (
            "Vehicles: {total} - Cars: {cars} - Trucks: {trucks} - Percentage: {percentage} - Avg Speed: {avgSpeed}km/hr"
            .format(total=vehicle_count,
                    cars=car_count,
                    trucks=truck_count,
                    percentage=str("{0:.2f}".format(percentageActual)),
                    avgSpeed=str("{0:.2f}".format(avgSpeed)))), (30, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1)
        # MASK1
        # print(exit_mask)
        _frame = np.zeros(frame.shape, frame.dtype)
        # show_me(_img, text = "Numpy array initialized to zeros",show_output = self.show_output)
        _frame[:, :] = EXIT_COLOR
        # show_me(_img, text = "Set it to green",show_output = self.show_output)
        mask = cv2.bitwise_and(_frame, _frame, mask=exit_mask)
        # show_me(mask, text = "Set Mask color",show_output = SHOW_OUTPUT)
        cv2.addWeighted(mask, 1, frame, 1, 0, frame)
        show_me(frame, text="Added weigth to mask", show_output=SHOW_OUTPUT)
        # BOXES
        # PATHS
        # print(pathes)
        for i, path in enumerate(pathes):
            # print(path)
            centroid = np.array(path)[:, 2].tolist()
            contour = path[-1][1]
            # print(contour)
            x, y, w, h = contour
            cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1),
                          BOUNDING_BOX_COLOUR, 1)
            for point in centroid:
                cv2.circle(frame, point, 2, CAR_COLOURS[0], -1)
                cv2.polylines(frame, [np.int32(centroid)], False,
                              CAR_COLOURS[0], 1)
        show_me(frame, text="Created Paths", show_output=SHOW_OUTPUT)
        print("Frame number: " + str(frame_number) + "  ||  " +
              "Vehicle Count: " + str(vehicle_count))
        utils.save_frame(frame, "OUTPUT/processed_%04d.png" % frame_number)
        data["list"].append({
            "frameNo":
            frame_number,
            "Vehicles":
            vehicle_count,
            "Cars":
            car_count,
            "Trucks":
            truck_count,
            "Percentage":
            str("{0:.2f}".format(percentageActual)),
            "Speed":
            str("{0:.2f}".format(avgSpeed))
        })
        with open('output.txt', 'w') as jsonFile:
            json.dump(data, jsonFile, default=myconverter)
        with open('simulation.txt', 'w') as jsonFile:
            json.dump(simulation, jsonFile, default=myconverter)
def get_contour_feature(color_img, contours, edge_type):
    ''' Extract contour color, size, shape, color_gradient features

    Args:
        color_img: (ndarray) resized colored input img, sized [736, N, 3]
        contours: (list of ndarray), len = Num_of_cnts
        contours[0].shape = (Num_of_pixels, 1, 2)
    Returns:
        cnt_dic_list = [{
            'cnt': contours[i],
            'shape': cnt_pixel_distances[i],
            'color': cnt_avg_lab[i],
            'size': cnt_norm_size[i],
            'color_gradient': cnt_color_gradient[i]
        } for i in range(len(contours))]
    '''
    height, width, channel = color_img.shape

    # record the distance between pixels and the centroid
    # the number of sample distance depend on the dimension of the contour
    cnt_pixel_distances = []

    # record the color gradient of the contour
    cnt_color_gradient = []

    # several probable dimension of contour shape
    # If pixel s of the contour is between 4-8 , then we take 4 as its dimension.
    factor_360 = [4, 8, 20, 40, 90, 180, 360]

    most_cnt_len = len(contours[int(len(contours) * 0.8)])  # 248
    sample_number = min(factor_360,
                        key=lambda factor: abs(factor - most_cnt_len))  # 360

    for contour in tqdm(contours, desc=f'[{edge_type}] Feature Extraction'):
        pixel_features = []
        cM = get_centroid(contour)

        for pixel in contour:
            pixel = pixel[0]

            vector = pixel - cM
            horizon = (0, 1)
            distance = eucl_distance(pixel, cM)
            angle = angle_between(vector, horizon)

            pixel_features.append({
                'coordinate': pixel,
                'distance': distance,
                'angle': angle
            })

        max_distance = max([f['distance'] for f in pixel_features])
        for f in pixel_features:
            f['distance'] = f['distance'] / max_distance

        # find main rotate angle by fit ellipse
        ellipse = cv2.fitEllipse(
            contour)  # ((694.17, 662.93), (10.77, 22.17), 171.98)
        main_angle = ellipse[2]

        # rotate contour pixels to fit main angle and re-calculate pixels' angle.
        pixel_features = rotate_contour(pixel_features, main_angle)

        # ------------edit to here-------------

        pixel_distances, pixel_coordinates, color_gradient = \
            sample_by_angle(color_img, pixel_features, sample_number)

        cnt_pixel_distances.append(pixel_distances)
        cnt_color_gradient.append(color_gradient)

    max_size = len(max(contours, key=lambda x: len(x)))
    cnt_norm_size = [[len(contour) / max_size] for contour in contours]
    cnt_avg_lab = [FindCntAvgLAB(contour, color_img) for contour in contours]

    cnt_dic_list = [{
        'cnt': contours[i],
        'shape': cnt_pixel_distances[i],
        'color': cnt_avg_lab[i],
        'size': cnt_norm_size[i],
        'color_gradient': cnt_color_gradient[i]
    } for i in range(len(contours))]

    return cnt_dic_list
def get_features(color_img, contours, drawer, do_draw, filter_by_gradient):
    ''' Extract contour color, size, shape, color_gradient features

    Args:
        color_img: (ndarray) resized colored input img, sized [736, N, 3]
        contours: (list of ndarray), len = Num_of_cnts
        contours[0].shape = (Num_of_pixels, 1, 2)
    Returns:
        cnt_dic_list = [{
            'cnt': contours[i],
            'shape': cnt_pixel_distances[i],
            'color': cnt_avg_lab[i],
            'size': cnt_norm_size[i],
            'color_gradient': cnt_color_gradient[i]
        } for i in range(len(contours))]
    '''
    
    accept_cnts = []
    cnt_pixel_distances = []
    cnt_color_gradient = []
    sample_number = 180
    all_grads = [get_cnt_color_gradient(c, color_img) for c in contours]
    all_grad_mean = sum(all_grads) / len(all_grads) if len(all_grads) else 0
    high_grad = [g for g in all_grads if g > 40]
    high_grad_mean = sum(high_grad) / len(high_grad) if len(high_grad) else 0

    for contour, grad in tqdm(zip(contours, all_grads), desc='[Get features]', total=len(contours)):
        
        if filter_by_gradient:
            if (all_grad_mean > 20 and grad < 20) or (high_grad_mean > 60 and grad < 40):
                continue
        cnt_color_gradient.append(grad)
        accept_cnts.append(contour)

        pixel_features = []
        cM = get_centroid(contour)

        for pixel in contour:
            pixel = pixel[0]

            vector = pixel - cM
            horizon = (0, 1)
            distance = eucl_distance(pixel, cM)
            angle = angle_between(vector, horizon)
            
            pixel_features.append({
                'coordinate': pixel,
                'distance': distance, 
                'angle': angle
            })

        max_distance = max([f['distance'] for f in pixel_features])
        for f in pixel_features:
            f['distance'] = f['distance'] / max_distance

        # find main rotate angle by fit ellipse
        ellipse = cv2.fitEllipse(contour)   # ((694.17, 662.93), (10.77, 22.17), 171.98)
        main_angle = ellipse[2]

        # rotate contour pixels to fit main angle and re-calculate pixels' angle.
        pixel_features = rotate_contour(pixel_features, main_angle)
        
        # shape feature
        pixel_distances = [f['distance'] for f in pixel_features]
        dist_sample_step = len(pixel_distances) / sample_number
        pixel_distances = [pixel_distances[math.floor(i*dist_sample_step)] for i in range(sample_number)]
        cnt_pixel_distances.append(pixel_distances)

    contours = accept_cnts
    if do_draw and filter_by_gradient:
        drawer.save(drawer.draw(contours), '2-0-1_FilterByGrad')
    
    cnt_size = list(map(cv2.contourArea, contours))
    # cnt_size, contours = remove_size_outlier(cnt_size, contours, drawer)
    max_size = max(cnt_size)
    cnt_norm_size = [[size / max_size] for size in cnt_size]

    # color feature
    cnt_avg_lab = [get_color_feature(contour, color_img) for contour in contours]

    
    cnt_dic_list = [{
        'cnt': contours[i],
        'shape': cnt_pixel_distances[i],
        'color': cnt_avg_lab[i],
        'size': cnt_norm_size[i],
        'color_gradient': cnt_color_gradient[i],
    } for i in range(len(contours))]

    return contours, cnt_dic_list
예제 #20
0
def main():
    # creating exit mask from points, where we will be counting our vehicles
    global exit_mask
    global vehicle_count
    global car_count
    global truck_count
    global bike_count
    global sum_of_exit_mask

    img = cv2.imread(IMAGE_SOURCE)

    _img = np.zeros(img.shape, img.dtype)
    _img[:, :] = EXIT_COLOR
    mask = cv2.bitwise_and(_img, _img, mask=exit_mask)
    cv2.addWeighted(mask, 1, img, 1, 0, img)
    show_me(img, text="Added weigth to mask", show_output=SHOW_OUTPUT)

    print("1")
    capRun = skvideo.io.vreader(VIDEO_SOURCE)

    vidObj = cv2.VideoCapture(VIDEO_SOURCE)
    old_time = datetime.datetime.now()
    print("2")
    _frame_number = -1
    frame_number = -1
    pathes = []
    for frame in capRun:
        if not frame.any():
            print("Frame capture failed, stopping...")
            break
        _frame_number += 1
        if _frame_number % 2 != 0:
            continue
        frame_number += 1

        show_me(frame, text="Frame " + str(frame_number),
                show_output=SHOW_OUTPUT)
        print(frame_number)
        # objects Detected
        matches = []

        # Pass the image into the NN
        result = tfnet.return_predict(frame)
        #print("Count1:" + str(len(result)))

        for detected in result:
            l = detected["label"]
            x = detected["topleft"]["x"]
            y = detected["topleft"]["y"]
            w = detected["bottomright"]["x"] - detected["topleft"]["x"]
            h = detected["bottomright"]["y"] - detected["topleft"]["y"]
            # print(l,x,y,w,h)
            contour_valid = (w >= min_contour_width) and (h >= min_contour_height) and (
                w <= max_contour_width) and (h <= max_contour_height)
            if not contour_valid:
                continue
            centroid = utils.get_centroid(x, y, w, h)
            matches.append((l, (x, y, w, h), centroid))
        if not pathes:
            # print("Creating Pathes")
            for match in matches:
                pathes.append([match])
        else:
            new_pathes = []
            for path in pathes:
                # print("Initial path is: ",path)
                _min = 999999
                _match = None
                for p in matches:
                    if(len(path) == 1):
                        d = utils.distance(p[1], path[-1][1])
                    else:
                        # eg: [2,4,6] -> 2*4 - 2 = 6
                        xn = 2 * path[-1][1][0] - path[-2][1][0]
                        yn = 2 * path[-1][1][1] - path[-2][1][1]
                        d = utils.distance(
                            p[1], (xn, yn),
                            x_weight=x_weight,
                            y_weight=y_weight
                        )
                    if d < _min:
                        _min = d
                        _match = p
                if _match and _min <= max_dst:
                    # print("Found point: ",_match)
                    matches.remove(_match)  # Remove form current points
                    path.append(_match)  # Add to path
                    # print("Path is: ",path)
                    # Have a list of new paths incase a point did not move
                    new_pathes.append(path)
                # do not drop path if current frame has no matches
                if _match is None:
                    new_pathes.append(path)
            pathes = new_pathes
            if len(matches):
                for p in matches:
                    # print(p)
                    # do not add points that already should be counted
                    # if check_exit(p[2]):
                    #     continue
                    pathes.append([p])
        # save only last N points in every path in pathes
        for i, _ in enumerate(pathes):
            pathes[i] = pathes[i][path_size * -1:]
        # print(pathes)
        # Count vehicles entering exit zone
        new_pathes = []
        for i, path in enumerate(pathes):
            d = path[-2:]
            if (
                # need at least two points to count
                len(d) >= 2 and
                # prev point not in exit zone
                check_exit(d[0][2]) and
                # current point in exit zone
                not check_exit(d[1][2]) and
                # path len is bigger then min
                path_size <= len(path)
            ):
                vehicle_count += 1
                vehicle = 'car'
                if(path[-1][0] == 'car'):
                    car_count += 1
                if(path[-1][0] == 'truck'):
                    vehicle = 'truck'
                    truck_count += 1
                # if(path[-1][0] == 'motorbike'):
                #     bike_count += 1
                # Adding timestamp to data
                msec = vidObj.get(cv2.CAP_PROP_POS_MSEC)
                time = old_time + datetime.timedelta(milliseconds=msec)
                # Adding direction to data
                data['list'].append(
                   {'time': time, 'type': vehicle, 'direction': 'in'})
                # print(data)
                new_pathes.append(path)
            else:
                # prevent linking with path that already in exit zone
                # add = True
                # for p in path:
                #     if check_exit(p[2]):
                #         add = False
                #         break
                # if add:
                #     new_pathes.append(path)
                new_pathes.append(path)
        pathes = new_pathes
    
        with open('output.txt','w') as jsonFile:
            json.dump(data, jsonFile, default = myconverter)
        with open('simulation.txt','w') as jsonFile:
            json.dump(simulation, jsonFile, default = myconverter)