Example #1
0
def get_set(metadataFile, classType):
    set = []

    with open(metadataFile, "r") as f:
        entries = f.readlines()

    for entry in entries:
        entry = entry.split()
        filePath = entry[0]
        x, y, scale = int(entry[1]), int(entry[2]), float(entry[3])

        img = cv2.imread(filePath)
        img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_gray_crop = img_gray[y:y + 128, x:x + 64]

        hog_gray = hog(img_gray_crop,
                       orientations=9,
                       pixels_per_cell=(8, 8),
                       cells_per_block=(2, 2),
                       visualise=False)

        prevFilePath = utils.get_prev_img(filePath)

        prev_img = cv2.imread(prevFilePath)
        prev_img = cv2.resize(prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        flow = cv2.calcOpticalFlowFarneback(prev_img_gray, img_gray, 0.5, 3,
                                            15, 3, 5, 1.2, 0)

        # flowx, flowy = flow[..., 0], flow[..., 1]
        # flowx_crop, flowy_crop = flowx[y:y+128, x:x+64], flowy[y:y+128, x:x+64]
        #
        # hog_flow_x = hog(flowx_crop, orientations=9, pixels_per_cell=(8, 8),
        #                  cells_per_block=(2, 2), visualise=False)
        # hog_flow_y = hog(flowy_crop, orientations=9, pixels_per_cell=(8, 8),
        #                  cells_per_block=(2, 2), visualise=False)

        hsv = numpy.zeros_like(img)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / numpy.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
        flow_gray = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)

        flow_gray_crop = flow_gray[y:y + 128, x:x + 64]

        hog_flow = hog(flow_gray_crop,
                       orientations=9,
                       pixels_per_cell=(8, 8),
                       cells_per_block=(2, 2),
                       visualise=False)

        desc = hog_gray + hog_flow

        set.append(desc)
    return set, [classType] * len(entries)
Example #2
0
def test_img(svm, img_path, scales, subwindow=None):
    base_img = cv2.imread(img_path)

    prev_img_path = utils.get_prev_img(img_path)
    base_prev_img = cv2.imread(prev_img_path)

    windows = []
    windows_features = []
    sc = []

    for scale in scales:
        img = cv2.resize(base_img, (0, 0), fx=scale, fy=scale)
        img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        prev_img = cv2.resize(base_prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_bw = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        height, width, _ = img.shape

        flow = cv2.calcOpticalFlowFarneback(prev_img_bw, img_bw, 0.5, 3, 15, 3, 5, 1.2, 0)

        hsv = np.zeros_like(img)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180/ np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
        flow_bw = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)

        if subwindow == None:
            nsx, nsy, nw, nh = 0, 0, width, height
        else:
            nsx, nsy, nw, nh = utils.getDetectionWindow(subwindow, width, height, scale)

        for x in range(nsx, nsx + nw - 64, 16):
            for y in range(nsy, nsy + nh - 128, 16):
                img_crop = img_bw[y:y + 128, x:x + 64]
                hog_gray = hog(img_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

                flow_crop = flow_bw[y:y + 128, x:x + 64]
                fd_flow = hog(flow_crop, orientations=9, pixels_per_cell=(8, 8),
                              cells_per_block=(2, 2), visualise=False)
                fd = hog_gray + fd_flow

                windows.append((x, y))
                windows_features.append(fd)
                sc.append(scale)

    classes = svm.predict(windows_features)

    results = []
    for i in range(0, len(windows)):
            if classes[i] == 1:
                scale = sc[i]
                results.append((int(windows[i][0] / scale), int(windows[i][1] / scale), int(64 / scale), int(128 / scale)))
    return results
Example #3
0
def getWindowsAndDescriptors(img_path, scales, subwindow=None):
    base_img = cv2.imread(img_path)

    prev_img_path = utils.get_prev_img(img_path)
    base_prev_img = cv2.imread(prev_img_path)

    windows = []

    for scale in scales:
        img = cv2.resize(base_img, (0, 0), fx=scale, fy=scale)
        img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        prev_img = cv2.resize(base_prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_bw = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        height, width, _ = img.shape

        flow = cv2.calcOpticalFlowFarneback(prev_img_bw, img_bw, 0.5, 3, 15, 3,
                                            5, 1.2, 0)

        hsv = np.zeros_like(img)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
        flow_bw = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)

        if subwindow == None:
            nsx, nsy, nw, nh = 0, 0, width, height
        else:
            nsx, nsy, nw, nh = utils.getDetectionWindow(
                subwindow, width, height, scale)

        for x in range(nsx, nsx + nw - 64, 16):
            for y in range(nsy, nsy + nh - 128, 16):
                img_crop = img_bw[y:y + 128, x:x + 64]
                hog_gray = hog(img_crop,
                               orientations=9,
                               pixels_per_cell=(8, 8),
                               cells_per_block=(2, 2),
                               visualise=False)

                flow_crop = flow_bw[y:y + 128, x:x + 64]
                fd_flow = hog(flow_crop,
                              orientations=9,
                              pixels_per_cell=(8, 8),
                              cells_per_block=(2, 2),
                              visualise=False)

                fd = hog_gray + fd_flow

                windows.append([(int(x / scale), int(y / scale),
                                 int(64 / scale), int(128 / scale)), fd])
    return windows
Example #4
0
def test_img_new(svm, img_path, scales, subwindow=None):
    base_img = cv2.imread(img_path)

    prev_img_path = utils.get_prev_img(img_path)
    base_prev_img = cv2.imread(prev_img_path)

    windows = []
    windows_features = []
    sc = []

    for scale in scales:
        img = cv2.resize(base_img, (0, 0), fx=scale, fy=scale)
        img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        prev_img = cv2.resize(base_prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_bw = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        height, width, _ = img.shape

        flow = cv2.calcOpticalFlowFarneback(prev_img_bw, img_bw, 0.5, 3, 15, 3, 5, 1.2, 0)

        flowx, flowy = flow[..., 0], flow[..., 1]

        if subwindow == None:
            nsx, nsy, nw, nh = 0, 0, width, height
        else:
            nsx, nsy, nw, nh = utils.getDetectionWindow(subwindow, width, height, scale)

        for x in range(nsx, nsx + nw - 64, 16):
            for y in range(nsy, nsy + nh - 128, 16):
                img_crop = img_bw[y:y + 128, x:x + 64]
                hog_gray = hog(img_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

                flowx_crop, flowy_crop = flowx[y:y+128, x:x+64], flowy[y:y+128, x:x+64]

                hog_flow_x = hog(flowx_crop, orientations=9, pixels_per_cell=(8, 8),
                                 cells_per_block=(2, 2), visualise=False)
                hog_flow_y = hog(flowy_crop, orientations=9, pixels_per_cell=(8, 8),
                                 cells_per_block=(2, 2), visualise=False)

                fd = numpy.concatenate((hog_gray, hog_flow_x, hog_flow_y))

                windows.append((x, y))
                windows_features.append(fd)
                sc.append(scale)

    classes = svm.predict(windows_features)

    results = []
    for i in range(0, len(windows)):
            if classes[i] == 1:
                scale = sc[i]
                results.append((int(windows[i][0] / scale), int(windows[i][1] / scale), int(64 / scale), int(128 / scale)))
    return results
Example #5
0
def get_set(metadataFile, classType):
    set = []

    with open(metadataFile, "r") as f:
        entries = f.readlines()

    for entry in entries:
        entry = entry.split()
        filePath = entry[0]
        x, y, scale = int(entry[1]), int(entry[2]), float(entry[3])

        img = cv2.imread(filePath)
        img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_gray_crop = img_gray[y:y+128, x:x+64]

        hog_gray = hog(img_gray_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

        prevFilePath = utils.get_prev_img(filePath)


        prev_img = cv2.imread(prevFilePath)
        prev_img = cv2.resize(prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        flow = cv2.calcOpticalFlowFarneback(prev_img_gray, img_gray, 0.5, 3, 15, 3, 5, 1.2, 0)


        # flowx, flowy = flow[..., 0], flow[..., 1]
        # flowx_crop, flowy_crop = flowx[y:y+128, x:x+64], flowy[y:y+128, x:x+64]
        #
        # hog_flow_x = hog(flowx_crop, orientations=9, pixels_per_cell=(8, 8),
        #                  cells_per_block=(2, 2), visualise=False)
        # hog_flow_y = hog(flowy_crop, orientations=9, pixels_per_cell=(8, 8),
        #                  cells_per_block=(2, 2), visualise=False)

        hsv = numpy.zeros_like(img)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180/ numpy.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
        flow_gray = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)

        flow_gray_crop = flow_gray[y:y+128, x:x+64]

        hog_flow = hog(flow_gray_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

        desc = hog_gray + hog_flow

        set.append(desc)
    return set, [classType] * len(entries)
Example #6
0
def getDecisionFunctionsForWindows(checkerFile,
                                   imageDirs,
                                   classifier,
                                   thresh,
                                   hog=True,
                                   subwindow=False):
    checker = detection_checker.Checker(checkerFile)
    fileLists = checker.getFileList()
    metadata = utils.parseMetadata(*imageDirs)

    trueClasses = []
    scores = numpy.array([])

    scales = [[0.45, 0.5, 0.55], [0.4, 0.45, 0.5], [0.3, 0.35], [0.3]]
    scaleSteps = [35, 45, 65, 90]

    index = 0
    for imgPath in fileLists[:10]:
        print index
        index += 1
        tilt = int(metadata[imgPath]['tilt'])
        if tilt > 90:
            tilt = 90 - (tilt - 90)

        imgScales = []
        for i in range(0, len(scaleSteps)):
            if tilt < scaleSteps[i]:
                imgScales = scales[i]
                break

        boundingRect = None
        if subwindow:
            boundingRect = optical_flow.optical_flow(
                imgPath, utils.get_prev_img(imgPath))

        if hog:
            wf = tester_hog.getWindowsAndDescriptors(imgPath,
                                                     imgScales,
                                                     subwindow=boundingRect)
        else:
            wf = cascade_tester.get_images(classifier,
                                           imgPath,
                                           imgScales,
                                           subwindow=boundingRect)

        tc = checker.getWindowsClasses(imgPath, [w[0] for w in wf])

        df = computeClassifierDecisions(wf, classifier, thresh, hog=hog)

        print df

        trueClasses = trueClasses + tc
        scores = numpy.concatenate((scores, df))

    return trueClasses, scores
Example #7
0
def test_img_new(svm, img_path, scales, subwindow=None):
    base_img = cv2.imread(img_path)

    prev_img_path = utils.get_prev_img(img_path)
    base_prev_img = cv2.imread(prev_img_path)

    windows = []
    windows_features = []
    sc = []

    for scale in scales:
        img = cv2.resize(base_img, (0, 0), fx=scale, fy=scale)
        img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        prev_img = cv2.resize(base_prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_bw = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        height, width, _ = img.shape

        flow = cv2.calcOpticalFlowFarneback(prev_img_bw, img_bw, 0.5, 3, 15, 3,
                                            5, 1.2, 0)

        flowx, flowy = flow[..., 0], flow[..., 1]

        if subwindow == None:
            nsx, nsy, nw, nh = 0, 0, width, height
        else:
            nsx, nsy, nw, nh = utils.getDetectionWindow(
                subwindow, width, height, scale)

        for x in range(nsx, nsx + nw - 64, 16):
            for y in range(nsy, nsy + nh - 128, 16):
                img_crop = img_bw[y:y + 128, x:x + 64]
                hog_gray = hog(img_crop,
                               orientations=9,
                               pixels_per_cell=(8, 8),
                               cells_per_block=(2, 2),
                               visualise=False)

                flowx_crop, flowy_crop = flowx[y:y + 128,
                                               x:x + 64], flowy[y:y + 128,
                                                                x:x + 64]

                hog_flow_x = hog(flowx_crop,
                                 orientations=9,
                                 pixels_per_cell=(8, 8),
                                 cells_per_block=(2, 2),
                                 visualise=False)
                hog_flow_y = hog(flowy_crop,
                                 orientations=9,
                                 pixels_per_cell=(8, 8),
                                 cells_per_block=(2, 2),
                                 visualise=False)

                fd = numpy.concatenate((hog_gray, hog_flow_x, hog_flow_y))

                windows.append((x, y))
                windows_features.append(fd)
                sc.append(scale)

    classes = svm.predict(windows_features)

    results = []
    for i in range(0, len(windows)):
        if classes[i] == 1:
            scale = sc[i]
            results.append(
                (int(windows[i][0] / scale), int(windows[i][1] / scale),
                 int(64 / scale), int(128 / scale)))
    return results
Example #8
0
def test_multiscale(hog_classifier_file, icf_classifier_file, hog_result_dir,
                    icf_result_dir, no_samples):
    hog_classifier = tester_hog.load_classifier(hog_classifier_file)
    icf_classifier = tester_icf.load_classifier(icf_classifier_file)

    filepaths = [
        "/home/mataevs/captures/metadata/dump_05_05_01_50",
        "/home/mataevs/captures/metadata/dump_05_06_13_10",
        "/home/mataevs/captures/metadata/dump_10_06_11_47",
        "/home/mataevs/captures/metadata/dump_05_05_01_51",
        "/home/mataevs/captures/metadata/dump_05_06_13_15",
        "/home/mataevs/captures/metadata/dump_07_05_11_40",
        "/home/mataevs/captures/metadata/dump_10_06_11_48",
        "/home/mataevs/captures/metadata/dump_05_05_11_54",
        "/home/mataevs/captures/metadata/dump_05_06_13_20",
        "/home/mataevs/captures/metadata/dump_07_05_11_46",
        "/home/mataevs/captures/metadata/dump_10_06_12_16",
        "/home/mataevs/captures/metadata/dump_05_06_12_57",
        "/home/mataevs/captures/metadata/dump_05_06_13_21",
        "/home/mataevs/captures/metadata/dump_05_06_13_24",
        "/home/mataevs/captures/metadata/dump_16_06_14_57",
        "/home/mataevs/captures/metadata/dump_05_06_13_25",
        "/home/mataevs/captures/metadata/dump_07_05_12_03",
        "/home/mataevs/captures/metadata/dump_16_06_15_26",
        "/home/mataevs/captures/metadata/dump_05_06_13_28",
        "/home/mataevs/captures/metadata/dump_07_05_12_05"
    ]

    if not os.path.exists(hog_result_dir):
        os.makedirs(hog_result_dir)
    if not os.path.exists(icf_result_dir):
        os.makedirs(icf_result_dir)

    testImages = utils.getFullImages(*filepaths)

    metadata = utils.parseMetadata(*filepaths)

    scales = [[0.45, 0.5, 0.55], [0.4, 0.45, 0.5], [0.3, 0.35], [0.3]]
    scaleSteps = [35, 45, 65, 90]

    for sample in range(0, no_samples):
        print "### Sample " + str(sample) + " ###"
        imgPath = random.choice(testImages)

        img = cv2.imread(imgPath)

        tilt = int(metadata[imgPath]['tilt'])
        if tilt > 90:
            tilt = 90 - (tilt - 90)

        imgScales = []
        for i in range(0, len(scaleSteps)):
            if tilt < scaleSteps[i]:
                imgScales = scales[i]
                break

        print imgScales

        prev_img_path = utils.get_prev_img(imgPath)
        prev_img = cv2.imread(prev_img_path)

        flow_rgb, boundingRect = optical_flow.optical_flow(img, prev_img)

        height, width, _ = img.shape

        bestWindowsHog = tester_hog.test_img(hog_classifier,
                                             imgPath,
                                             imgScales,
                                             allPositive=True,
                                             flow_rgb=flow_rgb,
                                             subwindow=boundingRect)
        if bestWindowsHog != None and bestWindowsHog != []:
            scale = bestWindowsHog[0][4]
            img_hog = cv2.resize(img, (0, 0), fx=scale, fy=scale)

            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect,
                                                      img_hog.shape[1],
                                                      img_hog.shape[0], scale)
                cv2.rectangle(img_hog, (x, y), (x + w, y + h), (0, 0, 255),
                              thickness=2,
                              lineType=8)

            utils.draw_detections(img_hog, bestWindowsHog)
        else:
            scale = 0.5
            img_hog = cv2.resize(img, (0, 0), fx=scale, fy=scale)
            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect,
                                                      img_hog.shape[1],
                                                      img_hog.shape[0], scale)
                cv2.rectangle(img_hog, (x, y), (x + w, y + h), (0, 0, 255),
                              thickness=2,
                              lineType=8)

        cv2.imwrite(hog_result_dir + "/sample_2_" + str(sample) + ".jpg",
                    img_hog)

        bestWindowsIcf = tester_icf.test_img(icf_classifier,
                                             imgPath,
                                             imgScales,
                                             allPositive=True,
                                             subwindow=boundingRect)
        if bestWindowsIcf != None and bestWindowsIcf != []:
            scale = bestWindowsIcf[0][4]
            img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)

            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect,
                                                      img_icf.shape[1],
                                                      img_icf.shape[0], scale)
                cv2.rectangle(img_icf, (x, y), (x + w, y + h), (0, 0, 255),
                              thickness=2,
                              lineType=8)

            utils.draw_detections(img_icf, bestWindowsIcf)
        else:
            scale = 0.5
            img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)
            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect,
                                                      img_icf.shape[1],
                                                      img_icf.shape[0], scale)
                cv2.rectangle(img_icf, (x, y), (x + w, y + h), (0, 0, 255),
                              thickness=2,
                              lineType=8)

        cv2.imwrite(icf_result_dir + "/sample_2_" + str(sample) + ".jpg",
                    img_icf)
Example #9
0
def test_multiscale(
        hog_classifier_file,
        icf_classifier_file,
        hog_result_dir,
        icf_result_dir,
        no_samples):
    hog_classifier = tester_hog.load_classifier(hog_classifier_file)
    icf_classifier = tester_icf.load_classifier(icf_classifier_file)

    filepaths = [
        "/home/mataevs/captures/metadata/dump_05_05_01_50",
        "/home/mataevs/captures/metadata/dump_05_06_13_10",
        "/home/mataevs/captures/metadata/dump_10_06_11_47",
        "/home/mataevs/captures/metadata/dump_05_05_01_51",
        "/home/mataevs/captures/metadata/dump_05_06_13_15",
        "/home/mataevs/captures/metadata/dump_07_05_11_40",
        "/home/mataevs/captures/metadata/dump_10_06_11_48",
        "/home/mataevs/captures/metadata/dump_05_05_11_54",
        "/home/mataevs/captures/metadata/dump_05_06_13_20",
        "/home/mataevs/captures/metadata/dump_07_05_11_46",
        "/home/mataevs/captures/metadata/dump_10_06_12_16",
        "/home/mataevs/captures/metadata/dump_05_06_12_57",
        "/home/mataevs/captures/metadata/dump_05_06_13_21",
        "/home/mataevs/captures/metadata/dump_05_06_13_24",
        "/home/mataevs/captures/metadata/dump_16_06_14_57",
        "/home/mataevs/captures/metadata/dump_05_06_13_25",
        "/home/mataevs/captures/metadata/dump_07_05_12_03",
        "/home/mataevs/captures/metadata/dump_16_06_15_26",
        "/home/mataevs/captures/metadata/dump_05_06_13_28",
        "/home/mataevs/captures/metadata/dump_07_05_12_05"
    ]

    if not os.path.exists(hog_result_dir):
        os.makedirs(hog_result_dir)
    if not os.path.exists(icf_result_dir):
        os.makedirs(icf_result_dir)

    testImages = utils.getFullImages(*filepaths)

    metadata = utils.parseMetadata(*filepaths)

    scales = [
        [0.45, 0.5, 0.55],
        [0.4, 0.45, 0.5],
        [0.3, 0.35],
        [0.3]
    ]
    scaleSteps = [35, 45, 65, 90]

    for sample in range(0, no_samples):
        print "### Sample " + str(sample) + " ###"
        imgPath = random.choice(testImages)

        img = cv2.imread(imgPath)

        tilt = int(metadata[imgPath]['tilt'])
        if tilt > 90:
            tilt = 90 - (tilt - 90)

        imgScales = []
        for i in range(0, len(scaleSteps)):
            if tilt < scaleSteps[i]:
                imgScales = scales[i]
                break

        print imgScales

        prev_img_path = utils.get_prev_img(imgPath)
        prev_img = cv2.imread(prev_img_path)

        flow_rgb, boundingRect = optical_flow.optical_flow(img, prev_img)

        height, width, _ = img.shape

        bestWindowsHog = tester_hog.test_img(hog_classifier, imgPath, imgScales, allPositive=True, flow_rgb=flow_rgb, subwindow=boundingRect)
        if bestWindowsHog != None and bestWindowsHog != []:
            scale = bestWindowsHog[0][4]
            img_hog = cv2.resize(img, (0, 0), fx=scale, fy=scale)

            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect, img_hog.shape[1], img_hog.shape[0], scale)
                cv2.rectangle(img_hog, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)

            utils.draw_detections(img_hog, bestWindowsHog)
        else:
            scale = 0.5
            img_hog = cv2.resize(img, (0, 0), fx=scale, fy=scale)
            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect, img_hog.shape[1], img_hog.shape[0], scale)
                cv2.rectangle(img_hog, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)

        cv2.imwrite(hog_result_dir + "/sample_2_" + str(sample) + ".jpg", img_hog)

        bestWindowsIcf = tester_icf.test_img(icf_classifier, imgPath, imgScales, allPositive=True, subwindow=boundingRect)
        if bestWindowsIcf != None and bestWindowsIcf != []:
            scale = bestWindowsIcf[0][4]
            img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)

            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect, img_icf.shape[1], img_icf.shape[0], scale)
                cv2.rectangle(img_icf, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)

            utils.draw_detections(img_icf, bestWindowsIcf)
        else:
            scale = 0.5
            img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)
            if boundingRect != None:
                x, y, w, h = utils.getDetectionWindow(boundingRect, img_icf.shape[1], img_icf.shape[0], scale)
                cv2.rectangle(img_icf, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)

        cv2.imwrite(icf_result_dir + "/sample_2_" + str(sample) + ".jpg", img_icf)