Exemple #1
0
def ranking_with_classifier(train_corpus,
                            test_corpus,
                            train_rels,
                            topics,
                            p,
                            ix,
                            alpha1=0.5,
                            alpha2=0.5):
    lst = []
    for topic_id in topics:
        results = ranking(topic_id, p, ix, "TF-IDF")
        results = [(el[0], el[1] / results[0][1]) for el in results]
        new_corpus = []
        for id1 in (el[0] for el in results):
            for el in test_corpus:
                if el[0] == id1:
                    new_corpus.append((id1, el[1]))

        topic = process_topic(topic_id, topic_directory)
        model = training(topic,
                         train_corpus,
                         train_rels,
                         model=KNeighborsClassifier(n_neighbors=25,
                                                    metric="euclidean"))
        classes = [
            classify(new_corpus[i][1], topic, model)
            for i in range(len(new_corpus))
        ]
        results = [(el[0], el[1] * alpha1 + classes[i] * alpha2)
                   for i, el in enumerate(results)]
        results.sort(reverse=True, key=lambda x: x[1])
        lst.append(results)
    return lst
def main(args):
    #Clean previous image
    clean_images()
    #Training phase
    model = training()

    vidcap = cv2.VideoCapture(args.file_name)

    fps = vidcap.get(cv2.CAP_PROP_FPS)
    width = vidcap.get(3)  # float
    height = vidcap.get(4)  # float

    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, fps, (640, 480))

    # initialize the termination criteria for cam shift, indicating
    # a maximum of ten iterations or movement by a least one pixel
    # along with the bounding box of the ROI
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    roiBox = None
    roiHist = None

    success = True
    similitary_contour_with_circle = 0.65  # parameter
    count = 0
    current_sign = None
    current_text = ""
    current_size = 0
    sign_count = 0
    coordinates = []
    position = []
    file = open("Output.txt", "w")
    while True:
        success, frame = vidcap.read()
        if not success:
            print("FINISHED")
            break
        width = frame.shape[1]
        height = frame.shape[0]
        #frame = cv2.resize(frame, (640,int(height/(width/640))))
        frame = cv2.resize(frame, (640, 480))

        print("Frame:{}".format(count))
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        coordinate, image, sign_type, text = localization(
            frame, args.min_size_components,
            args.similitary_contour_with_circle, model, count, current_sign)
        if coordinate is not None:
            cv2.rectangle(image, coordinate[0], coordinate[1], (255, 255, 255),
                          1)
        print("Sign:{}".format(sign_type))
        if sign_type > 0 and (not current_sign or sign_type != current_sign):
            current_sign = sign_type
            current_text = text
            top = int(coordinate[0][1] * 1.05)
            left = int(coordinate[0][0] * 1.05)
            bottom = int(coordinate[1][1] * 0.95)
            right = int(coordinate[1][0] * 0.95)

            position = [
                count, sign_type if sign_type <= 8 else 8, coordinate[0][0],
                coordinate[0][1], coordinate[1][0], coordinate[1][1]
            ]
            cv2.rectangle(image, coordinate[0], coordinate[1], (0, 255, 0), 1)
            font = cv2.FONT_HERSHEY_PLAIN
            cv2.putText(image, text, (coordinate[0][0], coordinate[0][1] - 15),
                        font, 1, (0, 0, 255), 2, cv2.LINE_4)

            tl = [left, top]
            br = [right, bottom]
            print(tl, br)
            current_size = math.sqrt(
                math.pow((tl[0] - br[0]), 2) + math.pow((tl[1] - br[1]), 2))
            # grab the ROI for the bounding box and convert it
            # to the HSV color space
            roi = frame[tl[1]:br[1], tl[0]:br[0]]
            roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
            #roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)

            # compute a HSV histogram for the ROI and store the
            # bounding box
            roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
            roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
            roiBox = (tl[0], tl[1], br[0], br[1])

        elif current_sign:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)

            # apply cam shift to the back projection, convert the
            # points to a bounding box, and then draw them
            (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
            pts = np.int0(cv2.boxPoints(r))
            s = pts.sum(axis=1)
            tl = pts[np.argmin(s)]
            br = pts[np.argmax(s)]
            size = math.sqrt(pow((tl[0] - br[0]), 2) + pow((tl[1] - br[1]), 2))
            print(size)

            if current_size < 1 or size < 1 or size / current_size > 30 or math.fabs(
                (tl[0] - br[0]) / (tl[1] - br[1])) > 2 or math.fabs(
                    (tl[0] - br[0]) / (tl[1] - br[1])) < 0.5:
                current_sign = None
                print("Stop tracking")
            else:
                current_size = size

            if sign_type > 0:
                top = int(coordinate[0][1])
                left = int(coordinate[0][0])
                bottom = int(coordinate[1][1])
                right = int(coordinate[1][0])

                position = [
                    count, sign_type if sign_type <= 8 else 8, left, top,
                    right, bottom
                ]
                cv2.rectangle(image, coordinate[0], coordinate[1], (0, 255, 0),
                              1)
                font = cv2.FONT_HERSHEY_PLAIN
                cv2.putText(image, text,
                            (coordinate[0][0], coordinate[0][1] - 15), font, 1,
                            (0, 0, 255), 2, cv2.LINE_4)
            elif current_sign:
                position = [
                    count, sign_type if sign_type <= 8 else 8, tl[0], tl[1],
                    br[0], br[1]
                ]
                cv2.rectangle(image, (tl[0], tl[1]), (br[0], br[1]),
                              (0, 255, 0), 1)
                font = cv2.FONT_HERSHEY_PLAIN
                cv2.putText(image, current_text, (tl[0], tl[1] - 15), font, 1,
                            (0, 0, 255), 2, cv2.LINE_4)

        if current_sign:
            sign_count += 1
            coordinates.append(position)

        cv2.imshow('Result', image)
        count = count + 1
        #Write to video
        out.write(image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    file.write("{}".format(sign_count))
    for pos in coordinates:
        file.write("\n{} {} {} {} {} {}".format(pos[0], pos[1], pos[2], pos[3],
                                                pos[4], pos[5]))
    print("Finish {} frames".format(count))
    file.close()
    return
# from sign_detection import extract_roi
from detect_circles import extract_roi
from classification import training, getLabel, SVM
import cv2
import numpy as np

textLables = ["not defined", "50", "80", "70", "15", "5", "60", "40", "30"]

# video_cap = cv2.VideoCapture(0)
video_cap = cv2.VideoCapture("jarab.avi")

model = training()

# model = SVM()
# model.load('data_svm.dat')

while True:
    if not video_cap.isOpened():
        print("error: video capture is not opened")
        break
    ret, frame = video_cap.read()

    if not ret:
        print("error: failed to read frame")
        break

    frame = cv2.resize(frame, (640, 480))

    ret = extract_roi(frame)

    if ret is None: