Пример #1
0
def show_inference(model, image_path):
    image_np = np.array(image_path)
    lane_image = copy.deepcopy(image_np)
    height, width, channel = image_np.shape

    output_dict = functions.get_dict(dashPointer, model, image_np)

    confidencesCars, boxesCars, confidencesLights, boxesLights, confidencesPersons, boxesPersons = functions.findBoxes(
        image_np, output_dict)

    indexesLights = cv2.dnn.NMSBoxes(boxesLights, confidencesLights, 0.5, 0.4)
    indexesCars = cv2.dnn.NMSBoxes(boxesCars, confidencesCars, 0.5, 0.4)
    indexesPersons = cv2.dnn.NMSBoxes(boxesPersons, confidencesPersons, 0.5,
                                      0.4)

    image_np = signalDetection_utils.signalDetection(indexesLights,
                                                     boxesLights, image_np)
    image_np = tracking_utils.tracking(indexesCars, boxesCars, image_np)
    image_np = estimate_collide_utils.estimate_collide(indexesCars, boxesCars,
                                                       image_np)
    image_np = estimate_stepping_utils.estimate_stepping(
        indexesPersons, boxesPersons, image_np)

    cv2.putText(image_np, "DAY", (width - 200, 50), font, 2, (167, 133, 0), 2,
                cv2.LINE_AA)

    image_np = lane_detection_utils.draw_lines(lanePointer, lane_image,
                                               image_np)
    # lane_detection_utils.all_lines(lanePointer , lane_image , image_np)

    cv2.imshow("finally", image_np)
Пример #2
0
def show_inference(dashPointer, lanePointer, frame):
    global flagPerson, areaPerson, areaDetails
    global crash_count_frames
    global signalCounter, flagSignal
    global prev_frame, number
    global flagLanes

    image_np = np.array(frame)
    lane_image = copy.deepcopy(image_np)
    height, width, channel = image_np.shape

    output_dict = functions.get_dict(dashPointer, detection_model, image_np)

    confidencesCars, boxesCars, confidencesLights, boxesLights, confidencesPersons, boxesPersons = functions.findBoxes(
        image_np, output_dict)

    indexesLights = cv2.dnn.NMSBoxes(boxesLights, confidencesLights, 0.5, 0.4)
    indexesCars = cv2.dnn.NMSBoxes(boxesCars, confidencesCars, 0.5, 0.4)
    indexesPersons = cv2.dnn.NMSBoxes(boxesPersons, confidencesPersons, 0.5,
                                      0.4)

    image_np, signalCounter, flagSignal = signalDetection_utils.signalDetection(
        indexesLights, boxesLights, image_np, signalCounter, flagSignal)
    image_np, prev_frame, number = tracking_utils.tracking(
        indexesCars, boxesCars, image_np, prev_frame, number)
    image_np, crash_count_frames = estimate_collide_utils.estimate_collide(
        indexesCars, boxesCars, image_np, crash_count_frames)
    image_np, flagPerson, areaPerson, areaDetails = estimate_stepping_utils.estimate_stepping(
        indexesPersons, boxesPersons, image_np, flagPerson, areaPerson,
        areaDetails)

    cv2.putText(image_np, "DAY", (50, 90), font, 2, (167, 133, 0), 2,
                cv2.LINE_AA)

    image_np, flagLanes = lane_detection_utils.draw_lines(
        lanePointer, dashPointer, lane_image, image_np, flagLanes)
    # lane_detection_utils.all_lines(lanePointer , lane_image , image_np)

    cv2.imshow("Final result", image_np)
def yolo_infer(dashPointer, lanePointer, frame):
    global flagPerson, areaPerson, areaDetails
    global crash_count_frames
    global signalCounter, flagSignal
    global prev_frame, number
    global flagLanes

    image_np = np.array(frame)
    lane_image = copy.deepcopy(image_np)
    height, width, channel = image_np.shape

    mask = 255 * np.ones_like(image_np)
    vertices = np.array(dashPointer, np.int32)
    cv2.fillPoly(mask, [vertices], [0, 0, 0])
    image_np = cv2.bitwise_and(image_np, mask)

    # Detecting objects
    blob = cv2.dnn.blobFromImage(image_np,
                                 0.00392, (416, 416), (0, 0, 0),
                                 True,
                                 crop=False)
    net.setInput(blob)
    outs = net.forward(output_layers)

    confidencesCars, boxesCars = [], []
    confidencesLights, boxesLights = [], []
    confidencesPersons, boxesPersons = [], []

    for out in outs:
        for detection in out:
            scores = detection[5:]
            class_id = np.argmax(scores)
            if (class_id == 2 or class_id == 5 or class_id == 7):

                scr = scores[class_id]
                center_x = detection[0] * width
                center_y = detection[1] * height

                w = detection[2] * width
                h = detection[3] * height
                xmin = center_x - w / 2
                ymin = center_y - h / 2
                if (w * h >= 800):
                    confidencesCars.append(float(scr))
                    boxesCars.append([int(xmin), int(ymin), int(w), int(h)])
            elif class_id == 0:
                # print(scores)

                scr = scores[class_id]
                center_x = detection[0] * width
                center_y = detection[1] * height

                w = detection[2] * width
                h = detection[3] * height
                xmin = center_x - w / 2
                ymin = center_y - h / 2
                confidencesPersons.append(float(scr))
                boxesPersons.append([int(xmin), int(ymin), int(w), int(h)])

            elif class_id == 9:
                scr = scores[class_id]
                center_x = detection[0] * width
                center_y = detection[1] * height

                w = detection[2] * width
                h = detection[3] * height
                xmin = center_x - w / 2
                ymin = center_y - h / 2
                confidencesLights.append(float(scr))
                boxesLights.append([int(xmin), int(ymin), int(w), int(h)])

    indexesLights = cv2.dnn.NMSBoxes(boxesLights, confidencesLights, 0.5, 0.4)
    indexesCars = cv2.dnn.NMSBoxes(boxesCars, confidencesCars, 0.5, 0.4)
    indexesPersons = cv2.dnn.NMSBoxes(boxesPersons, confidencesPersons, 0.5,
                                      0.4)

    image_np, signalCounter, flagSignal = signalDetection_utils.signalDetection(
        indexesLights, boxesLights, image_np, signalCounter, flagSignal)
    image_np, prev_frame, number = tracking_utils.tracking(
        indexesCars, boxesCars, image_np, prev_frame, number)
    image_np, crash_count_frames = estimate_collide_utils.estimate_collide(
        indexesCars, boxesCars, image_np, crash_count_frames)
    image_np, flagPerson, areaPerson, areaDetails = estimate_stepping_utils.estimate_stepping(
        indexesPersons, boxesPersons, image_np, flagPerson, areaPerson,
        areaDetails)

    cv2.putText(image_np, "DAY", (width - 200, 50), font, 2, (167, 133, 0), 2,
                cv2.LINE_AA)

    image_np, flagLanes = lane_detection_utils.draw_lines(
        lanePointer, dashPointer, lane_image, image_np, flagLanes)

    cv2.imshow("finally", image_np)