Esempio n. 1
0
def bootstrap(bootstrap_indexes, BG_img, params, trf, trl, trfc, svm):
    folder = params["folder"]
    marginX = params["marginX"]
    marginY = params["marginY"]
    neg_weight = params["neg_weight"]
    method = params["method"]
    feature = params["feature"]

    train_features = trf
    train_labels = trl
    train_feature_count = trfc

    print "Starting bootstrapping..."

    # Bootstrapping
    for i in tqdm(range(len(bootstrap_indexes))):
        img = img_read(folder, bootstrap_indexes[i])
        #motion_img = read_motion_image(folder, bootstrap_indexes[i], BG_img)
        bboxes = read_bboxes(folder, bootstrap_indexes[i])

        #detections = detect_vehicles(img, motion_img, svm, params)
        detections = detect_vehicles(img, None, svm, params)

        hard_negatives = []
        for j in detections:
            if overlaps(j, bboxes) == -1:
                hard_negatives.append(j)

        height, width = img.shape
        hard_negatives = add_bbox_margin(hard_negatives, marginX, marginY,
                                         height, width)

        for j in hard_negatives:
            img_cut = img[j[0]:j[1], j[2]:j[3]]
            #motion_img_cut = motion_img[j[0]:j[1], j[2]:j[3]]
            train_feature_count += 1
            #train_features.append(extract(img_cut, motion_img_cut, method, feature))
            train_features.append(extract(img_cut, None, method, feature))
            train_labels.append(-1)

    print "Bootstrap finished."

    return train_features, train_labels
Esempio n. 2
0
def bootstrap(bootstrap_indexes, BG_img, params, trf, trl, trfc, svm):
    folder = params["folder"]
    marginX = params["marginX"]
    marginY = params["marginY"]
    neg_weight = params["neg_weight"]
    method = params["method"]
    feature = params["feature"]

    train_features = trf
    train_labels = trl
    train_feature_count = trfc

    print "Starting bootstrapping..."

    # Bootstrapping
    for i in tqdm(range(len(bootstrap_indexes))):
        img = img_read(folder, bootstrap_indexes[i])
        #motion_img = read_motion_image(folder, bootstrap_indexes[i], BG_img)
        bboxes = read_bboxes(folder, bootstrap_indexes[i])

        #detections = detect_vehicles(img, motion_img, svm, params)
        detections = detect_vehicles(img, None, svm, params)

        hard_negatives = []
        for j in detections:
            if overlaps(j, bboxes) == -1:
                hard_negatives.append(j)

        height, width = img.shape
        hard_negatives = add_bbox_margin(hard_negatives, marginX, marginY, height, width)

        for j in hard_negatives:
            img_cut = img[j[0]:j[1], j[2]:j[3]]
            #motion_img_cut = motion_img[j[0]:j[1], j[2]:j[3]]
            train_feature_count += 1
            #train_features.append(extract(img_cut, motion_img_cut, method, feature))
            train_features.append(extract(img_cut, None, method, feature))
            train_labels.append(-1)
    
    print "Bootstrap finished."

    return train_features, train_labels
Esempio n. 3
0
def sw_search(test_indexes, BG_img, params, svm):
    print "Performing sliding window search"
    folder = params["folder"]

    svm_AP = [0] * len(test_indexes)
    svm_PR = []
    svm_RC = []
    k = 0

    for i in tqdm(range(len(test_indexes))):
        img = img_read(folder, test_indexes[i])
        #motion_img = read_motion_image(folder, test_indexes[i], BG_img)
        bboxes = read_bboxes(folder, test_indexes[i])

        #detections = detect_vehicles(img, motion_img, svm, params)
        detections = detect_vehicles(img, None, svm, params)
        detections = non_max_suppression(detections, 0.01)

        #index = 0
        for j in detections:
            img = cv2.rectangle(img, (j[2], j[0]), (j[3], j[1]), (0, 255, 0),
                                1)
            cv2.putText(img,
                        str(j[4])[:5], (int(j[2]), int(j[0])),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 255))

        filename = "detection{:0>5d}.png".format(k)
        cv2.imwrite(filename, img)

        svm_AP[k], pr, rc = compute_detection_AP(detections, bboxes)

        k += 1

        svm_PR.append(pr)
        svm_RC.append(rc)

    print "Sliding window search is done!"

    return svm_AP, svm_PR, svm_RC
Esempio n. 4
0
def sw_search(test_indexes, BG_img, params, svm):
    print "Performing sliding window search"
    folder = params["folder"]

    svm_AP = [0] * len(test_indexes)
    svm_PR = []
    svm_RC = []
    k = 0

    for i in tqdm(range(len(test_indexes))):
        img = img_read(folder, test_indexes[i])
        #motion_img = read_motion_image(folder, test_indexes[i], BG_img)
        bboxes = read_bboxes(folder, test_indexes[i])

        #detections = detect_vehicles(img, motion_img, svm, params)
        detections = detect_vehicles(img, None, svm, params)
        detections = non_max_suppression(detections, 0.01)

        #index = 0
        for j in detections:
            img = cv2.rectangle(img,(j[2],j[0]),(j[3],j[1]),(0,255,0),1)
            cv2.putText(img, str(j[4])[:5], (int(j[2]),int(j[0])), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,255,255))

        filename = "detection{:0>5d}.png".format(k)
        cv2.imwrite(filename, img)
        
        svm_AP[k], pr, rc = compute_detection_AP(detections, bboxes)

        k += 1

        svm_PR.append(pr)
        svm_RC.append(rc)

    print "Sliding window search is done!"

    return svm_AP, svm_PR, svm_RC