def execute_dlib_detector(self, img):

        #http://dlib.net/python/index.html#dlib.fhog_object_detector
        #dets, scores, weights = self.detectors[0].run(img)
        #dets, scores, weights = self.detectors[1].run(img)
        #import time
        #start_time = time.time()
        dets, scores, weights = dlib.fhog_object_detector.run_multiple(self.detectors, img, upsample_num_times=1, adjust_threshold=0.01)
        #print("%s" % (time.time() - start_time))
        #print(dets)
        #print(scores)
        #print(weights)
        ## dets = self.det_bound_check(dets, 2)

        if len(dets) > 1:
            nms_dets, nms_scores, nms_det_types = nms.non_max_suppression_fast(dets, scores, weights, overlapThresh=0.5)
            #print("Number of objects detected: {}".format(len(nms_dets)))
        else:
            nms_dets, nms_scores, nms_det_types = dets, scores, weights

        #print new_dets
        #win.clear_overlay()
        #win.set_image(img)
        #win.add_overlay(dets)
        #dlib.hit_enter_to_continue()

        #io.imshow(img)
        #io.show()
        return nms_dets, nms_scores, nms_det_types
예제 #2
0
    def filter_boxes(self, contours, width, height):
        ix, iy = self.get_crop_values(width, height)
        width += ix * 2
        height += iy * 2

        boxes = []
        for cnt in contours:
            area = cv2.contourArea(cnt)
            if area < self.min_area:
                continue
            x, y, w, h = cv2.boundingRect(cnt)
            solidity = area / (w * h)
            if solidity < self.min_solidity:
                continue
            if w > 128 and h > 96:
                continue
            boxes.append([x, y, x+w, y+h])

        boxes = non_max_suppression_fast(np.array(boxes), 0.3).tolist()
        for i, box in enumerate(boxes):
            box[0] += ix
            box[1] += iy
            box[2] += ix
            box[3] += iy

            box[0] = float(box[0]) / width
            box[1] = float(box[1]) / height
            box[2] = float(box[2]) / width
            box[3] = float(box[3]) / height
            boxes[i] = box
        return boxes
예제 #3
0
def run():
    fileList = os.listdir(cfg.resultsFolder)
    resultsFileList = filter(lambda element: '.result' in element, fileList)

    for resultsFile in resultsFileList:

        resultsFilePath = cfg.resultsFolder + '/' +resultsFile
        file = open(resultsFilePath, 'rb')
        imageResults = pickle.load(file)

        boxes = imageResults['bboxes']
        scores = imageResults['scores']
        imagepath = imageResults['imagepath']

        filename = os.path.basename(imagepath)
        if boxes is None:
            print ('No pedestrians found for image '+imagepath)
            continue

        print ('Saving results for image '+filename)

        idx = np.where(scores > cfg.decision_threshold)
        boxes = boxes[idx]
        scores = scores[idx]

        boxes, scores = nms.non_max_suppression_fast(boxes, scores, overlapthresh= cfg.nmsOverlapThresh)

        img = Image.open(imagepath)
        #Show the results on a colored image
        img = drawing.drawResultsOnImage(img, boxes, scores)
        img.save('Results/'+filename,"PNG")

        file.close()

    print ('Finished!')
예제 #4
0
def generatePrediction(img, save_dir):
    windows_size = (150, 150)
    initial_bounding_box_scaler = 0.25
    max_bounding_box_scaler = 0.4
    step_size = 0.05
    image_size = (100, 100)
    #input image
    #img = cv2.imread(d)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    bounding_box_scales = np.arange(initial_bounding_box_scaler,
                                    max_bounding_box_scaler,
                                    step_size)  #list of bounding box scales
    log_stages = 3  #LoG stages

    #shown in pseudocode 1 on the paper
    ROI_candidates = []

    for scales in bounding_box_scales:
        for r in ROI_generation(img, log_stages, scales, image_size,
                                windows_size):
            ROI_candidates.append(r)

    #convert image to BGR to evaluate the result
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    #merge redundant bounding box using non_max_supression
    roi = non_max_suppression_fast(np.array(ROI_candidates), 0.3)

    #draw all bounding_box
    for (x, y, w, h) in roi:
        cv2.rectangle(img, (x, y), (w, h), (255, 0, 0), 2)

    cv2.imwrite(save_dir, img)
    def detect_hand(self, img_norm):
        assert -1 <= img_norm.min() and img_norm.max() <= 1,\
        "img_norm should be in range [-1, 1]"
        assert img_norm.shape == (256, 256, 3),\
        "img_norm shape must be (256, 256, 3)"

        out_reg, out_clf = self.sess_palm.run(
            ['regressors:0', 'classificators:0'],
            feed_dict={'input:0': img_norm.reshape(1, 256, 256, 3)})
        out_reg = out_reg[0]
        out_clf = out_clf[0, :, 0]

        # finding the best prediction
        out_clf = np.clip(out_clf, -20, 20)
        probabilities = self._sigm(out_clf)
        detecion_mask = probabilities > 0.3
        candidate_detect = out_reg[detecion_mask]
        candidate_anchors = self.anchors[detecion_mask]
        probabilities = probabilities[detecion_mask]

        if candidate_detect.shape[0] == 0:
            print("No hands found")
            return None, None

        # Pick the best bounding box with non maximum suppression
        # the boxes must be moved by the corresponding anchor first
        moved_candidate_detect = candidate_detect.copy()
        moved_candidate_detect[:, :2] = candidate_detect[:, :2] + (
            candidate_anchors[:, :2] * 256)
        box_ids = non_max_suppression_fast(moved_candidate_detect[:, :4],
                                           probabilities)

        keypoints_list = []
        side_list = []

        for max_idx in box_ids:
            # bounding box offsets, width and height
            dx, dy, w, h = candidate_detect[max_idx, :4]
            center_wo_offst = candidate_anchors[max_idx, :2] * 256

            # 7 initial keypoints
            keypoints = center_wo_offst + candidate_detect[max_idx,
                                                           4:].reshape(-1, 2)
            side = max(w, h) * self.box_enlarge

            keypoints_list.append(keypoints)
            side_list.append(side)

        return keypoints_list, side_list
예제 #6
0
def multiple_template_matching(origin, tmp):
    img_gray = cv2.cvtColor(origin, cv2.COLOR_BGR2GRAY)
    w, h = tmp.shape[::-1]

    res = cv2.matchTemplate(img_gray, tmp, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= THRESHOLD)

    boxes = []
    for pt in zip(*loc[::-1]):
        boxes.append([*pt, pt[0] + w, pt[1] + h])
    pick = non_max_suppression_fast(np.array(boxes), OVERLAP_THRESH)

    for (startX, startY, endX, endY) in pick:
        cv2.rectangle(origin, (startX, startY), (endX, endY), (0, 255, 0), 2)

    return origin
예제 #7
0
def extract(image, string, thresh):
    copy = image.copy()
    image_gray = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
    #Read the templates
    template_data = []
    #make a list of all template images from a directory
    files = glob.glob(string + '/*')
    for myfile in files:
        t_image = cv2.imread(myfile, 0)
        template_data.append(t_image)
    arr = [[]]
    #loop for matching
    for tmp in template_data:
        (tW, tH) = tmp.shape[::-1]
        res = cv2.matchTemplate(image_gray, tmp, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= thresh)

        #Draw rectangles around each instance in the image
        for pt in zip(*loc[::-1]):
            #cv2.rectangle(copy, pt, (pt[0] + tW, pt[1] + tW), (0,0,255), 1)
            arr.append([pt[0], pt[1], pt[0] + tW, pt[1] + tH])

    arr.pop(0)
    arr = np.array(arr)
    images = [('input.png', arr)]
    iter_num = 1
    images = images * iter_num

    #Loop over the images
    for (imagePath, Boxes) in images:
        #Load the image
        image1 = cv2.imread(imagePath)
        orig = image1.copy()

        #Loop over the bounding boxes for each image and draw them
        for (startX, startY, endX, endY) in Boxes:
            cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 0, 255), 1)
        #Non-maximum suppression on the bounding boxes
        pick = non_max_suppression_fast(Boxes, probs=None, overlapThresh=0.3)

    return pick
def generateBox(imagePath, destDir):
    img = cv2.imread(imagePath, 1)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # mser,最大稳定极值区域
    mser = cv2.MSER_create()

    # detect regions in gray scale image
    regions, _ = mser.detectRegions(img_gray)
    hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]

    # vis = img.copy()
    keep = []

    for c in hulls:
        x, y, w, h = cv2.boundingRect(c)

        # 过滤到较大字体的boxing,赖茅,仁酒识别受影响
        if (w > 10 and w < 100 and h > 10 and h < 200):
            # pass
            # cv2.rectangle(vis, (x, y), (x + w, y + h), (255, 0, 0), 1)
            keep.append([x, y, x + w, y + h])

    # cv2.imshow('hulls', vis)
    # cv2.waitKey(0)

    orig = img.copy()
    keep2 = np.array(keep)
    pick = nms.non_max_suppression_fast(keep2, 0.5)
    # imgNameType = imageUrl.split('//')[-1]
    imgNameType = os.path.basename(imagePath)
    imgName = imgNameType.split('.')[0]

    for (startX, startY, endX, endY) in pick:
        cv2.rectangle(orig, (startX, startY), (endX, endY), (255, 0, 0), 1)
        cropped = img_gray[startY:endY, startX:endX]
        cv2.imwrite(
            '%s//%s-%d-%d-%d-%d.jpg' %
            (destDir, imgName, startX, startY, endX, endY), cropped)
예제 #9
0
def main_merge_with_NMS_optimizer():
    warnings.filterwarnings('ignore', category=UserWarning, module='skimage')

    new_test_ids = []
    rles = []

    detector = None
    detector_second = None
    with CustomObjectScope({
            'relu6':
            applications.mobilenet.relu6,
            'DepthwiseConv2D':
            applications.mobilenet.DepthwiseConv2D
    }):
        detector = load_model('detector_MobileNet.h5')
        detector_second = load_model('detector_MobileNet_Second.h5')
    u_net = load_model("U-net/Unet(32x32).h5",
                       custom_objects={'mean_iou': mean_iou})

    dir_ = "../../data/stage1_test/"
    #dir_ = "../../data/1/"

    # Считывание данных
    test_data = []
    print("Read data")
    ids = next(os.walk(dir_))[1]
    for n, id_ in tqdm(enumerate(ids), total=len(ids)):
        path = dir_ + id_ + "/images/"
        test_data.append(imread(path + id_ + ".png", as_grey=True))

    # Подготовка данных
    sizes_nucl = load_obj("../sizes_nuclears")
    for i in range(len(test_data)):
        median = sizes_nucl[ids[i]]
        coef = median / DIAG_IMAGE

        if coef < 1:
            test_data[i] = resize(
                test_data[i],
                (int(test_data[i].shape[0] / (median / DIAG_IMAGE)),
                 int(test_data[i].shape[1] / (median / DIAG_IMAGE))),
                mode='constant',
                preserve_range=True)

    ids = next(os.walk(dir_))[1]
    for n, id_ in tqdm(enumerate(ids), total=len(ids)):
        try:
            image = test_data[n]

            images = []
            masks = []

            bounding_boxs = []
            bounding_boxs_nms = []

            # Создаем разные размеры исходной картинки
            cur_size = (int(image.shape[0] * MAX_RESIZE),
                        int(image.shape[1] * MAX_RESIZE))
            min_size = (image.shape[0] // MIN_RESIZE,
                        image.shape[1] // MIN_RESIZE)

            images.append(image)
            masks.append(np.zeros(image.shape, dtype=np.float64))
            while True:
                images.append(
                    resize(image,
                           cur_size,
                           mode='constant',
                           preserve_range=True))
                masks.append(np.zeros(cur_size, dtype=np.float64))

                cur_size = (int(cur_size[0] // COEF_RES),
                            int(cur_size[1] // COEF_RES))
                if cur_size[0] <= min_size[0] or cur_size[1] <= min_size[1]:
                    break

            # Обрабатываем каждую получившуюся картинку
            for i in range(len(images)):
                im = images[i]

                # Вырезаем из картинки квадратики для детектора

                square_for_detector = []
                bounds = []
                bbs = []

                for h in range(SIZE_IMAGE[0], im.shape[0] + STRIDES[0],
                               STRIDES[0]):
                    lower_bound = min(h, im.shape[0])

                    for w in range(SIZE_IMAGE[1], im.shape[1] + STRIDES[1],
                                   STRIDES[1]):
                        right_bound = min(w, im.shape[1])

                        bounds.append((right_bound, lower_bound))
                        square_for_detector.append(im[lower_bound - SIZE_IMAGE[0]: lower_bound,
                                                           right_bound - SIZE_IMAGE[1]: right_bound]\
                                                           .reshape((SIZE_IMAGE[0], SIZE_IMAGE[1], 1)))

                square_for_detector = np.array(square_for_detector)

                # Говорит что в квадрате есть клетки
                is_nucl = detector.predict(square_for_detector)
                square_for_detector = np.array([
                    square_for_detector[i]
                    for i in range(len(square_for_detector))
                    if is_nucl[i][0] > TRESHHOLD_FIRST_DETECTOR
                ])
                bounds = [
                    bounds[i] for i in range(len(bounds))
                    if is_nucl[i][0] > TRESHHOLD_FIRST_DETECTOR
                ]

                # Говорит, что в квадрате только одна клетка
                if len(square_for_detector) > 0:
                    is_one_nucl = detector_second.predict(square_for_detector)

                    bounds = [
                        bounds[i] for i in range(len(bounds))
                        if is_one_nucl[i][0] > TRESHHOLD_SECOND_DETECTOR
                    ]
                    for box in bounds:
                        bbs.append((box[0] - SIZE_IMAGE[0],
                                    box[1] - SIZE_IMAGE[1], box[0], box[1]))

                bounding_boxs.append(bbs)
                bounding_boxs_nms.append(
                    non_max_suppression_fast(np.array(bbs), 0.6))

            result_mask = np.zeros(image.shape, dtype=np.float64)
            result_bbs = []

            # Мержим все боксы
            for i in range(len(images)):
                h_coef = images[i].shape[0] / image.shape[0]
                w_coef = images[i].shape[1] / image.shape[1]
                for bb in bounding_boxs_nms[i]:
                    box = (bb[0] // w_coef, bb[1] // h_coef, bb[2] // w_coef,
                           bb[3] // h_coef)
                    result_bbs.append(box)

            result_nms = non_max_suppression_fast(np.array(result_bbs), 0.5)

            # Предсказание по NMS
            for box in result_nms:
                im_to_unet = np.ndarray((1, SIZE_IMAGE[0], SIZE_IMAGE[1], 1),
                                        dtype=np.float64)

                # Приводим размеры квадрата к размерам входа Юнет
                im_to_unet[0] = resize(image[box[1]:box[3], box[0]:box[2]],
                                       SIZE_IMAGE,
                                       mode='constant',
                                       preserve_range=True).reshape(
                                           (SIZE_IMAGE[0], SIZE_IMAGE[1], 1))
                # Предсказывание
                pred_mask = u_net.predict(im_to_unet)[0]

                # Возвращаем начальный размер и вставляем в итоговую маску
                copy_arr_to_arr(
                    result_mask[box[1]:box[3], box[0]:box[2]],
                    resize(pred_mask, (box[3] - box[1], box[2] - box[0]),
                           mode='constant',
                           preserve_range=True), "mean_except_zero")

            # Кодирование
            mask_to_encde = result_mask

            rle = list(prob_to_rles(mask_to_encde, cutoff=0.5))
            rles.extend(rle)
            new_test_ids.extend([id_] * len(rle))
            if len(rle) == 0:
                rles.extend([[1, 1]])
                new_test_ids.extend([id_])

            save_result("../../data/detector_unet_pred/{}.png".format(id_),
                        image, mask_to_encde, images, bounding_boxs,
                        bounding_boxs_nms, result_bbs, result_nms)

        except StopIteration:
            print("Exception id: " + id_)

    # Create submission DataFrame
    sub = pd.DataFrame()
    sub['ImageId'] = new_test_ids
    sub['EncodedPixels'] = pd.Series(rles).apply(
        lambda x: ' '.join(str(y) for y in x))

    # Чистка от мусора
    def f(x):
        res = 0
        for i in range(1, len(x), 2):
            res += i
        return res

    sub['CountPix'] = pd.Series(rles).apply(f)
    sub = sub[sub['CountPix'] > 10][['ImageId', 'EncodedPixels']]

    sub.to_csv('detector_Unet.csv', index=False)
예제 #10
0
# construct a list containing the images that will be examined
# along with their respective bounding boxes
images = [("/home/xingxi/Desktop/orange_00000226.jpg",
           np.array([(12, 96, 140, 224), (12, 84, 140, 212),
                     (24, 84, 152, 212), (36, 84, 164, 212),
                     (24, 96, 152, 224), (24, 108, 152, 236)]))]

# loop over the images
for (imagePath, boundingBoxes) in images:
    # load the image and clone it
    print("[x] %d initial bounding boxes" % (len(boundingBoxes)))
    image = cv2.imread(imagePath)
    orig = image.copy()

    # loop over the bounding boxes for each image and draw them
    for (startX, startY, endX, endY) in boundingBoxes:
        cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 0, 255), 2)

    # perform non-maximum suppression on the bounding boxes
    # pick = non_max_suppression_slow(boundingBoxes, 0.3)
    pick = non_max_suppression_fast(boundingBoxes, 0.3)
    print("[x] after applying non-maximum, %d bounding boxes" % (len(pick)))

    # loop over the picked bounding boxes and draw them
    for (startX, startY, endX, endY) in pick:
        cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)

    # display the images
    cv2.imshow("Original", orig)
    cv2.imshow("After NMS", image)
    cv2.waitKey(0)
예제 #11
0
def run():

    print ('Start evaluating results')
    fileList = os.listdir(cfg.resultsFolder)
    resultsFileList = filter(lambda element: '.result' in element, fileList)

    detection_thresholds = np.arange(cfg.decision_threshold_min,
                                     cfg.decision_threshold_max,
                                     cfg.decision_threshold_step)

    totalTP = np.zeros(len(detection_thresholds))
    totalFN = np.zeros(len(detection_thresholds))
    totalFP = np.zeros(len(detection_thresholds))

    for resultsFile in resultsFileList:
        resultsFilePath = cfg.resultsFolder+'/'+resultsFile

        file = open(resultsFilePath, 'rb')
        imageResults = pickle.load(file)
        file.close()

        #Retrieve the data for this result
        detectedBoxes = imageResults['bboxes']
        detectedScores = imageResults['scores']
        imagePath = imageResults['imagepath']

        curThreshIDX = 0

        imageFilename = os.path.basename(imagePath) # Get the filename
        imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension

        #Find the annotations for this image.
        annotationsFilePath = cfg.annotationsFolderPath+'/'+imageBasename+'.txt'
        annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)

        for thresh in detection_thresholds:
            #Select only the bounding boxes that passed the current detection threshold
            idx, = np.where(detectedScores > thresh)

            if len(idx) > 0:
                detectedBoxes = detectedBoxes[idx]
                detectedScores = detectedScores[idx]
                #Apply NMS on the selected bounding boxes
                detectedBoxes, detectedScores = nms.non_max_suppression_fast(detectedBoxes, detectedScores, overlapthresh= cfg.nmsOverlapThresh)
            else:
                detectedBoxes = []
                detectedScores = []

            #Compute the statistics for the current detected boxes
            TP, FP, FN = eval.evaluateImage(annotatedBoxes, detectedBoxes, detectedScores )

            totalTP[curThreshIDX] += TP
            totalFP[curThreshIDX] += FP
            totalFN[curThreshIDX] += FN

            curThreshIDX += 1

    #Compute metrics
    print (totalTP + totalFP)
    detection_rate = totalTP / (totalTP + totalFN) #Tasa de deteccion
    miss_rate = 1 - detection_rate #Tasa de error
    fppi = totalFP / len(list(filter(lambda element: '.result' in element, fileList))) #FPPI (Falsos positivos por imagen)

    #Plot the results
    plt.figure()
    plt.plot(fppi, miss_rate, 'r', label='Miss-Rate vs FPPI')

    plt.xlabel('FPPI ')
    plt.ylabel('Error rate')

    plt.title(cfg.model + ' ' + cfg.modelFeatures)
    plt.legend()
    plt.show()
      sscores = np.squeeze(scores);

      frame,bboxes = vis_util.visualize_boxes_and_labels_on_image_array(
              image,
              sboxes,
              sclasses,
              sscores,
              s_category_index,
              min_score_thresh=PERSON_TH,
              max_boxes_to_draw=7,
              use_normalized_coordinates=True,
	      skip_scores=True,
              line_thickness=8)
      #xmin,ymin,xmax,ymax = bboxes
      #rects.append((xmin,ymin,xmax,ymax))
      rects = non_max_suppression_fast(bboxes,0.80)
      objects = ct.update(rects)

      # loop over the tracked objects
      for (objectID, centroid) in objects.items():
            print(objects.keys())
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
예제 #13
0
#part two
#process the prediction data and make an output image
image = cv2.imread(args["image"])
image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
clone = image.copy()
xcoords = (image.shape[1]/8) - (8 - 1) #width of picture/stepSize - (stepSize - 1) 
ycoords = (image.shape[0]/8) - (8 - 1) #height of picture/stepSize - (stepSize - 1) 
boxes = []
for i in range(0, predictions.shape[0]):
    if predictions[i][1] > .4:
        x1 = ((i % xcoords)*8)
        y1 = ((i / xcoords)*8)
        x2 = x1 + WINDOW_DIM 
        y2 = y1 + WINDOW_DIM 
        boxes.append([x1,y1,x2,y2])
        circX = ((i % xcoords)*8)+32
        circY = ((i / xcoords)*8)+32 #caution! This may be a hacky fix!!!!
        cv2.circle(clone, (circX, circY), 4, 255, -1)

boxes = np.array(boxes)
boxes_nonmax = non_max_suppression_fast(boxes, 0.5)
for i in range(len(boxes_nonmax)):
    x1 = boxes_nonmax[i][0]
    y1 = boxes_nonmax[i][1]
    x2 = boxes_nonmax[i][2]
    y2 = boxes_nonmax[i][3]
    cv2.rectangle(clone, (x1,y1),(x2,y2), 255)

cv2.imwrite("output.jpg", clone)
예제 #14
0
ax1: plt.Axes
ax1.hist(detection.ravel(), 512, (-1, 1), log=True)
ax2.hist(detection_maxsup.ravel(), 512, (-1, 1), log=True)

rect_gray1 = img_gray.copy()
with Timer("NMS"):
    bb_locs = []
    h, w, _ = template.shape
    # Transform to x1, x2, y1, y2 points first
    for idx, pt in enumerate(locations):
        x1 = pt[0]
        x2 = x1 + w
        y1 = pt[1]
        y2 = y1 + h
        bb_locs.append([x1, y1, x2, y2])
    nonmaxed = nms.non_max_suppression_fast(np.array(bb_locs), 0.2)
    for pt in nonmaxed:
        print(f"Found: {pt}")
        cv2.rectangle(rect_gray1, (pt[0], pt[1]), (pt[0] + w, pt[1] + h), (255, 0, 0), 1)

fig = plt.figure()
ax = fig.subplots()
ax.imshow(rect_gray1, vmin=-1, vmax=1)
ax.set_title("Nonmax suppression")

print(f"Min: {dec_min}, Max: {dec_max}")

print(f"Found regions: {len(locations)}")
if len(locations) == 0:
    sleep_time = 0.5
else:
def testImage(imagePath, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    file = open(cfg.modelPath, 'r')
    svc = pickle.load(file)

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None

    for p in pyramid[0:]:
        #We now have the subsampled image in p

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            p = pad(p,cfg.padding,'reflect')

        try:
            views = view_as_windows(p, cfg.window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape
        for row in range(0, num_rows):
            for col in range(0, num_cols):

                #Get current window
                subImage = views[row, col]
                #Extract features
                feats = feature_extractor.extractFeatures(subImage)
                #Obtain prediction score
                decision_func = svc.decision_function(feats)

                if decision_func > decisionThreshold:
                    # Pedestrian found!
                    h, w = cfg.window_shape
                    scaleMult = math.pow(cfg.downScaleFactor, scale)

                    x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
                    y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
                    x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
                    y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))

                    bbox = (x1, y1, x2, y2)
                    score = decision_func[0]

                    if boxes is not None:
                        boxes = np.vstack((bbox, boxes))
                        scores = np.hstack((score, scores))
                    else:
                        boxes = np.array([bbox])
                        scores = np.array([score])
        scale += 1

    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores, cfg.nmsOverlapThresh)

    return boxes, scores
예제 #16
0
# all points
points = []
CXP, CYP = 0, 0
# loop runs if capturing has been initialized.
while True:
    # reads frames from a video
    ret, frames = cap.read()

    if ret:
        # convert to gray scale of each frames
        gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)

        # Detects cars of different sizes in the input image
        cars = car_cascade.detectMultiScale(gray, 1.03, 5, minSize=(50, 50))

        cars = non_max_suppression_fast(cars, 0.1)

        # To draw a rectangle in each cars
        for (x, y, w, h) in cars:
            # Calculating the centroid
            cX = int(x + 0.5 * w)
            cY = int(y + 0.5 * h)

            # speed
            speed = np.sqrt((cX - CXP)**2 + (cY - CYP)**2)
            speed = np.floor(speed)

            # update
            CXP, CYP = cX, cY

            # Drawing the centroid
예제 #17
0
def testImage(imagePath,
              decisionThreshold=cfg.decision_threshold,
              applyNMS=True):

    file = open(cfg.modelPath)
    svc = pickle.load(file)

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(
        image)  #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None

    for p in pyramid[0:]:
        #We now have the subsampled image in p

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            p = pad(p, cfg.padding, 'reflect')

        try:
            views = view_as_windows(p, cfg.window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape
        for row in range(0, num_rows):
            for col in range(0, num_cols):

                #Get current window
                subImage = views[row, col]
                #Extract features
                feats = feature_extractor.extractFeatures(subImage)
                #Obtain prediction score
                decision_func = svc.decision_function(
                    np.array(feats).reshape(1, -1))

                if decision_func > decisionThreshold:
                    # Pedestrian found!
                    h, w = cfg.window_shape
                    scaleMult = math.pow(cfg.downScaleFactor, scale)

                    x1 = int(scaleMult * (col * cfg.window_step - cfg.padding +
                                          cfg.window_margin))
                    y1 = int(scaleMult * (row * cfg.window_step - cfg.padding +
                                          cfg.window_margin))
                    x2 = int(x1 + scaleMult * (w - 2 * cfg.window_margin))
                    y2 = int(y1 + scaleMult * (h - 2 * cfg.window_margin))

                    bbox = (x1, y1, x2, y2)
                    score = decision_func[0]

                    if boxes is not None:
                        boxes = np.vstack((bbox, boxes))
                        scores = np.hstack((score, scores))
                    else:
                        boxes = np.array([bbox])
                        scores = np.array([score])
        scale += 1

    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores,
                                                     cfg.nmsOverlapThresh)

    return boxes, scores
############ BACKGROUND SUBTRACTION
# detect people in the frame
    (rects, weights) = hog.detectMultiScale(frame,
                                            winStride=(4, 4),
                                            padding=(8, 8),
                                            scale=1.05)
    print weights
    tt = []
    for i in range(0, len(weights)):
        if weights[i] > 1.1:
            (x, y, w, h) = rects[i]
            tt.append([x, y, x + w, y + h])
    rects = np.array(tt)
    #rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])

    pick = non_max_suppression_fast(rects, overlapThresh=0.65)

    # connecting new points to old paths
    if old_circles:
        for (xC, yC) in old_circles:
            flag = False
            for (xA, yA, xB, yB) in pick:
                (A, B) = ((xA + xB) / 2, (yA + yB) / 2)
                if (sqrt((A - xC)**2 + (B - yC)**2) <= 20):
                    flag = True
                    path[(A, B)] = path[(xC, yC)]
                    color[(A, B)] = color[(xC, yC)]
                    check[(A, B)] = check[(xC, yC)]
                    if (A, B) != (xC, yC):
                        del color[(xC, yC)]
                        del path[(xC, yC)]
예제 #19
0
def testImage(imagePath, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    fileList = os.listdir(cfg.modelRootPath)

    # Filter all model files
    modelsList = filter(lambda element: '.model' in element, fileList)

    # Filter our specific feature method
    currentModel = cfg.model+'_'+cfg.modelFeatures
    currentModelsList = filter(lambda element: currentModel in element, modelsList)


    models = []
    rectangleModel = []
    subImages = [] #To save backgorund crops

    for modelname in currentModelsList:

        file = open(cfg.modelRootPath + modelname, 'r')
        svc = pickle.load(file)

        if 'Rect' in modelname:
            rectangleModel.append(svc)
        else:
            models.append(svc)

        file.close()

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None

    #
    # for p in pyramid[0:]:
    #     #We now have the subsampled image in p
    #     window_shape = (64,64)
    #
    #     #Add padding to the image, using reflection to avoid border effects
    #     if cfg.padding > 0:
    #         p = pad(p,cfg.padding,'reflect')
    #
    #     try:
    #         views = view_as_windows(p, window_shape, step=cfg.window_step)
    #     except ValueError:
    #         #block shape is bigger than image
    #         break
    #
    #     num_rows, num_cols, width, height = views.shape
    #
    #     for row in range(0, num_rows):
    #         for col in range(0, num_cols):
    #             #Get current window
    #             subImage = views[row, col]
    #             # subImages.append(subImage)   #To save backgorund crops: Accumulate them in an array
    #             #Extract features
    #             feats = feature_extractor.extractFeatures(subImage)
    #
    #             #Obtain prediction score for each model
    #             for model in models:
    #
    #                 decision_func = model.decision_function(feats)
    #
    #                 if decision_func > 0.4:
    #                     # Signal found!
    #                     h, w = window_shape
    #                     scaleMult = math.pow(cfg.downScaleFactor, scale)
    #
    #                     x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
    #                     y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
    #                     x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
    #                     y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))
    #
    #                     #bootstrapping: Save image (if positive)
    #                     #subImages.append(subImage)
    #
    #                     bbox = (x1, y1, x2, y2)
    #                     score = decision_func[0]
    #
    #                     if boxes is not None:
    #                         boxes = np.vstack((bbox, boxes))
    #                         scores = np.hstack((score, scores))
    #                     else:
    #                         boxes = np.array([bbox])
    #                         scores = np.array([score])
    #                     break
    #
    #     scale += 1

    scale = 0
    for pR in pyramid[0:]:
        #We now have the subsampled image in p
        window_shape = (96,48)

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            pR = pad(pR,cfg.padding,'reflect')

        try:
            views = view_as_windows(pR, window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape

        for row in range(0, num_rows):
            for col in range(0, num_cols):
                #Get current window
                subImage = views[row, col]
                # subImages.append(subImage)   #To save backgorund crops: Accumulate them in an array

                #Extract features
                feats = feature_extractor.extractFeatures(subImage)

                #Obtain prediction score for each model
                for model in rectangleModel:
                    decision_func = model.decision_function(feats)

                    if decision_func > 0.3:
                        # Signal found!
                        h, w = window_shape
                        scaleMult = math.pow(cfg.downScaleFactor, scale)

                        x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
                        y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
                        x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
                        y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))

                        bbox = (x1, y1, x2, y2)
                        score = decision_func[0]

                        #bootstrapping: Save image (if positive)
                        subImages.append(subImage)

                        if boxes is not None:
                            boxes = np.vstack((bbox, boxes))
                            scores = np.hstack((score, scores))
                        else:
                            boxes = np.array([bbox])
                            scores = np.array([score])
                        break

        scale += 1

    # To save backgorund crops
    # numSubImages = len(subImages)
    # for x in range(0,10): #Save 10 crops for each background image
    #     randomIndex = random.randint(1,numSubImages-1) #Get a random window index
    #     imageName = imagePath.split('/')  #Working on the crop name...
    #     imageName = imageName[len(imageName)-1]
    #     filename = (imageName[:-4]+'-'+str(x)+'.jpg')
    #     io.imsave('Results/'+filename, subImages[randomIndex])  #Save the crop
    #end To save backgorund crops

    # To save bootstrapping windows
    numSubImages = len(subImages)
    length = min(10, len(subImages))
    for x in range(0,length) : #Save all windows with detections
        if numSubImages == 1:
            randomIndex = 0
        else:
            randomIndex = random.randint(1, numSubImages-1) #Get a random window index
        imageName = imagePath.split('/')  #Working on the crop name...
        imageName = imageName[len(imageName)-1]
        filename = (imageName[:-4]+'-'+str(x)+'_bootstrapping'+'.jpg')
        io.imsave('Bootstrapping/'+filename, subImages[randomIndex])  #Save the crop
    #end To save bootstrapping windows


    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores, cfg.nmsOverlapThresh)

    return boxes, scores
    ret, img = cap.read()
    img = cv2.resize(img,
                     None,
                     fx=scaling_factor,
                     fy=scaling_factor,
                     interpolation=cv2.INTER_AREA)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    bikes_without_nms, w = hog.detectMultiScale(gray,
                                                winStride=(8, 8),
                                                padding=(16, 16),
                                                scale=1.35)

    # bikes_without_nms may contain overlaping bounding boxes,
    # the non maximum supression removes all the overlapping bounding boxes,
    # overlapThresh is the threshold, for more info visit pyimagesearch.com
    bikes = nms.non_max_suppression_fast(bikes_without_nms, overlapThresh=0.8)
    bikes = vehicle_tracking.padding(bikes)

    #---------------------------------------------------------------------------------------------------------------------
    # removal of false positives and filtering out those bikes outside of the detection region
    bikes_inside_box = []
    if len(bikes) != 0:
        # bikes, which is a numpy array is changed to a list before removing false positives,
        # after removing the false positives, the result is changed back to numpy array
        print "bikes before: ", bikes
        bikes = vehicle_tracking.removeFalsePositives(bikes.tolist(),
                                                      UPPER_LIMIT=150)
        bikes = np.array(bikes)
        print "bikes after: ", bikes

        i = 0
예제 #21
0
def mainfunc():
    import numpy as np
    import cv2
    import nms
    import os
    import vehicle_tracking
    import datetime, time
    # import test_database
    # import prediction_regression
    # import sklearn_test
    # import PyQt4

    update = False


    xc=10
    yc=20
    line1 = [(10,380),(180,20)]
    line2 = [(465,20),(750,295)]
    drawLines = False

    cars_count=0
    last_cars_count=0

    car_cascade = cv2.CascadeClassifier('haarcascade_nepalese_vehicles.xml')
    cap = cv2.VideoCapture('cctv1.mp4')
    ret, img = cap.read()
    scaling_factor = 0.4
    height=int(np.size(img,0) * scaling_factor)
    width=int(np.size(img,1) * scaling_factor)
    detection_region = [(0, int(0.3*height)), (width, int(0.95 * height))]
    carspresent = []


    bikes_count=0
    last_bikes_count=0
    bikespresent = []
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )


    def draw_detections(img, rects, thickness = 1):
        for x, y, w, h in rects:
            # the HOG detector returns slightly larger rectangles than the real objects.
            # so we slightly shrink the rectangles to get a nicer output.
            # pad_w, pad_h = int(0.15*w), int(0.05*h)
            # cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)
            pad_w, pad_h = int(0.25*w), int(0.25*h)
            cv2.rectangle(img, (int(x+1.5*pad_w), int(y+1.5*pad_h)), (x+w-pad_w, y+h-pad_h), (255, 0, 0), thickness)




    def compareObjects(cars, carspresent, cars_count, CENTROID_DIFFERENCE_THRESH, THRESHOLD_OF_HISTOGRAM):

        # if there are no cars present in the frame, then there is no point in executing any of these statements
        if len(cars) != 0:
            # roi_cars is the array of regions of interest of all the cars present in the current frame
            roi_cars = []
            roi_carspresent = []

            for (x, y, w, h) in cars:
                roi_cars.append(img[y:y + h, x:x + w])

            if len(carspresent) == 0:
                carspresent = cars
                roi_carspresent = roi_cars
            else:
                for (x, y, w, h) in carspresent:
                    roi_carspresent.append(img[y:y + h, x:x + w])

            # declaration of some temporary variables to make it easier for executing the for loops
            carspresent_temp = carspresent
            cars_temp = cars
            roi_carspresent_temp = roi_carspresent
            roi_cars_temp = roi_cars


            # for every car 'A' present in the 'carspresent' array,ie of previous frame,
            # 'A' is compared with every other car,'B', in the 'cars' array,ie  of current frame.
            # If there is some degree of similarity between A and B, car A is replaced by b,
            # If there is no similarity at all, B must be a new car, so it is added to the array carspresent
            # and count is incremeted .
            for i in range(0, len(cars_temp)):
                newcar = cars_temp[i]
                roi_newcar = roi_cars_temp[i]
                update = False
                for j in range(0, len(carspresent_temp)):
                    roi_presentcar = roi_carspresent_temp[j]
                    presentcar = carspresent_temp[j]
                    if vehicle_tracking.isCentroidNear(newcar,presentcar, CENTROID_DIFFERENCE_THRESH = CENTROID_DIFFERENCE_THRESH):
                         # and vehicle_tracking.compareHist(roi_newcar, roi_presentcar, THRESHOLD_OF_HISTOGRAM = THRESHOLD_OF_HISTOGRAM):
                        update = True
                        carspresent[j] = cars[i]
                        roi_carspresent[j] = roi_cars[i]
                if update == False:
                    carspresent = np.vstack((carspresent, cars[i]))
                    cars_count = cars_count+1
                    roi_carspresent.append(roi_cars[i])

        return carspresent, cars_count




    while 1:
        date = time.localtime()
        ret, img = cap.read()
        img = cv2.resize(img, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        #***********************************************************************************************************************************************************
        # cars_without_nms may contain overlaping bounding boxes,
        # the non maximum supression removes all the overlapping bounding boxes,
        # overlapThresh is the threshold, for more info visit pyimagesearch.com
        cars_without_nms = car_cascade.detectMultiScale(gray, 6, 5)
        cars = nms.non_max_suppression_fast(cars_without_nms, overlapThresh=0.1)

        #---------------------------------------------------------------------------------------------------------------------
        # removal of false positives and filtering out those cars outside of the detection region
        cars_inside_box = []
        if len(cars)!=0:
            # cars, which is a numpy array is changed to a list before removing false positives,
            # after removing the false positives, the result is changed back to numpy array
            cars = vehicle_tracking.removeFalsePositives(cars.tolist(),LOWER_LIMIT=41, UPPER_LIMIT= 200)
            cars = np.array(cars)


            i = 0
            for (x, y, w, h) in cars:
                if y > detection_region[0][1] and y + h < detection_region[1][1]:
                    cars_inside_box.append(cars[i].tolist())
                i = i + 1
        cars_inside_box = np.array(cars_inside_box)

        #---------------------------------------------------------------------------------------------------------------------


        # the function compareObject compares all the objects detected in this frame with cars present from the previous frame
        # if new objects have arrived in the frame, it increments the count by the no of new objects that just arrived in the frame.
        carspresent, cars_count = compareObjects(cars_inside_box,carspresent,cars_count, CENTROID_DIFFERENCE_THRESH=50, THRESHOLD_OF_HISTOGRAM=0.6)
        carspresent = nms.non_max_suppression_fast(carspresent, 0.9999999)
        # emptyVehiclesOutOfWindow function removes the bounding boxes
        # of those vehicles that have possibly gone out of the window(detection region)
        carspresent = vehicle_tracking.emptyVehiclesOutOfWindow(carspresent, cars_count, last_cars_count)
        last_cars_count = cars_count


        for (x, y, w, h) in cars_inside_box:
            cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 0, 0), 1)

        cv2.putText(gray, 'Four Wheelers= ' + str(cars_count), (0, height), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), thickness=2)






        #***********************************************************************************************************************************************************

        bikes_without_nms, w = hog.detectMultiScale(gray, winStride=(8, 8), padding=(16, 16), scale=1.25)

        # bikes_without_nms may contain overlaping bounding boxes,
        # the non maximum supression removes all the overlapping bounding boxes,
        # overlapThresh is the threshold, for more info visit pyimagesearch.com
        bikes = nms.non_max_suppression_fast(bikes_without_nms, overlapThresh=0.8)

        # ---------------------------------------------------------------------------------------------------------------------
        # removal of false positives and filtering out those bikes outside of the detection region
        bikes_inside_box = []
        if len(bikes) != 0:
            # bikes, which is a numpy array is changed to a list before removing false positives,
            # after removing the false positives, the result is changed back to numpy array
            print "bikes before: ", bikes
            bikes = vehicle_tracking.removeFalsePositives(bikes.tolist(), UPPER_LIMIT=400)
            bikes = np.array(bikes)
            print "bikes after: ", bikes

            i = 0
            for (x, y, w, h) in bikes:
                if y > detection_region[0][1] and y + h < detection_region[1][1]:
                    bikes_inside_box.append(bikes[i].tolist())
                i = i + 1
        bikes_inside_box = np.array(bikes_inside_box)

        # ---------------------------------------------------------------------------------------------------------------------


        # the function compareObject compares all the objects detected in this frame with bikes present from the previous frame
        # if new objects have arrived in the frame, it increments the count by the no of new objects that just arrived in the frame.
        bikespresent, bikes_count = compareObjects(bikes_inside_box, bikespresent, bikes_count,
                                                   CENTROID_DIFFERENCE_THRESH=45, THRESHOLD_OF_HISTOGRAM=0.5)
        bikespresent = nms.non_max_suppression_fast(bikespresent, 0.9999999)
        # emptyVehiclesOutOfWindow function removes the bounding boxes
        # of those vehicles that have possibly gone out of the window(detection region)
        bikespresent = vehicle_tracking.emptyVehiclesOutOfWindow(bikespresent, bikes_count, last_bikes_count)
        last_bikes_count = bikes_count

        # print "bikes_inside_box: \n", bikes_inside_box
        # print "bikes_present: \n", bikespresent


        # draw_detections(gray,bikes_inside_box)
        draw_detections(gray, bikes_inside_box)

        cv2.putText(gray, 'Two Wheelers= ' + str(bikes_count), (int(width/2), height), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), thickness=2)
        cv2.rectangle(gray, detection_region[0], detection_region[1], (255, 255, 0), thickness=2)



        #***********************************************************************************************************************************************************








        cv2.rectangle(gray, detection_region[0], detection_region[1], (0, 0, 255))

        cv2.circle(gray, (xc, yc), 1, (255, 0, 0), thickness=3)
        if drawLines:
            cv2.line(gray, line1[0], line1[1], (255, 0, 0))
            cv2.line(gray, line2[0], line2[1], (255, 0, 0))
        cv2.imshow("main window", gray)

        # print xc,yc
        k = cv2.waitKey(15) & 0xff
        if k == 27:
            break
        elif k == 119:
            yc = yc - 5
        elif k == 97:
            xc = xc - 5
        elif k == 115:
            yc = yc + 5
        elif k == 100:
            xc = xc + 5
        elif k == 49:
            line1.append((xc, yc))
            print 'Line 1, starting point set to: ', line1[0]
        elif k == 50:
            line1.append((xc, yc))
            print 'Line 1, ending point set to: ', line1[1]
        elif k == 51:
            line2.append((xc, yc))
            print 'Line 2, starting point set to: ', line2[0]
        elif k == 52:
            line2.append((xc, yc))
            print 'Line 2, ending point set to: ', line2[1]
        elif k == 32:
            drawLines = True

        # print date.tm_min
        # if (date.tm_min == 15 or date.tm_min == 30 or date.tm_min == 45 or date.tm_min == 00) and update==False:
        #     update = True
        #     test_database.test_func(cars_count+bikes_count)
        #     sklearn_test.sklearn_func()
        #     # Choice = prediction_regression.compare_forecast()
        #     cars_count=0
        #     bikes_count=0


        # cv2.waitKey(0)
        print"-----------------------------------------------------------------------------"


    cap.release()
    cv2.destroyAllWindows()
예제 #22
0
# loop over the images
for (i, (imagePath, boundingBoxes)) in enumerate(images):
    # load the image and clone it
    # print ("[x] %d initial bounding boxes" % (len(boundingBoxes)))
    image = cv2.imread(imagePath)
    orig = image.copy()

    # loop over the bounding boxes for each image and draw them
    for (startX, startY, endX, endY) in boundingBoxes:
        cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 0, 255), 2)

    # perform non-maximum suppression on the bounding boxes
    # pick = non_max_suppression_slow(boundingBoxes, 0.3)

    pick = non_max_suppression_fast(boundingBoxes,
                                    probs=None,
                                    overlapThresh=0.3)

    # print ("[x] after applying non-maximum, %d bounding boxes" % (len(pick)))

    # loop over the picked bounding boxes and draw them
    for (startX, startY, endX, endY) in pick:
        cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)

    # display the images
    # cv2.imshow("Original" + i, orig)
    # cv2.imshow("After NMS" + i, image)
    # cv2.waitKey(0)

    # save the images
    cv2.imwrite("images/Original_" + str(i) + ".jpg", orig)
예제 #23
0
def find_balls(bgr, RADIUS_MIN=40, RADIUS_MAX=75):
    """
    Find the contours for all three masks, then use these
    to compute the minimum enclosing circle and centroid
    Returns:
        color : red, blue, green
        pos: x, y, radius (x-y pixel coordinates)
    """

    # Green Range
    lower_green = np.array([60, 50, 50])
    upper_green = np.array([90, 255, 255])

    # Blue Range
    lower_blue = np.array([100, 50, 50])
    upper_blue = np.array([130, 255, 255])

    # Lower Red range
    Llower_red = np.array([0, 50, 50])
    Lupper_red = np.array([20, 255, 255])

    # Upper Red range
    Ulower_red = np.array([145, 50, 50])
    Uupper_red = np.array([179, 255, 255])

    hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
    # Blur the image and remove speckle noise
    blur = cv2.medianBlur(hsv, 9)
    blur = cv2.GaussianBlur(hsv, (11, 11), 0)

    # Create a mask composed of both red ranges and merge together
    Lred_mask = cv2.inRange(blur, Llower_red, Lupper_red)
    Ured_mask = cv2.inRange(blur, Ulower_red, Uupper_red)
    red_mask = Lred_mask + Ured_mask
    red_mask = cv2.erode(red_mask,
                         np.ones((5, 5), dtype="uint8"),
                         iterations=3)
    red_mask = cv2.dilate(red_mask,
                          np.ones((5, 5), dtype="uint8"),
                          iterations=3)
    # Show only the red objects in the image
    red_circles = cv2.HoughCircles(red_mask, cv2.HOUGH_GRADIENT, 4.0, 10)

    # Create a mask for the blue range
    blue_mask = cv2.inRange(blur, lower_blue, upper_blue)
    blue_mask = cv2.erode(blue_mask,
                          np.ones((5, 5), dtype="uint8"),
                          iterations=3)
    blue_mask = cv2.dilate(blue_mask,
                           np.ones((5, 5), dtype="uint8"),
                           iterations=3)
    # Show only the blue objects in the image
    #blue_final = cv2.bitwise_and(bgr, bgr, mask = blue_mask)
    blue_circles = cv2.HoughCircles(blue_mask, cv2.HOUGH_GRADIENT, 4.0, 10)

    # Create a mask for the green range
    green_mask = cv2.inRange(blur, lower_green, upper_green)
    green_mask = cv2.erode(green_mask,
                           np.ones((5, 5), dtype="uint8"),
                           iterations=3)
    green_mask = cv2.dilate(green_mask,
                            np.ones((5, 5), dtype="uint8"),
                            iterations=3)
    # Show only the green objects in the image
    #green_final = cv2.bitwise_and(bgr, bgr, mask = green_mask)
    green_circles = cv2.HoughCircles(green_mask, cv2.HOUGH_GRADIENT, 4.0, 10)

    if red_circles is not None:
        red_circles = np.round(red_circles[0, :]).astype("int")
    if blue_circles is not None:
        blue_circles = np.round(blue_circles[0, :]).astype("int")
    if green_circles is not None:
        green_circles = np.round(green_circles[0, :]).astype("int")

    detected_red_balls = []
    detected_blue_balls = []
    detected_green_balls = []

    balls = {}

    ### Find the Contours
    # Red Contours
    red_contours = cv2.findContours(red_mask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[-2]
    if len(red_contours) > 0:  # only proceed if at least one contour was found
        # Find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and centroid
        for c in red_contours:
            approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)
            area = cv2.contourArea(c)
            if ((len(approx) > 8) &
                (area > 30)):  # adjust this to make largest
                ((x, y), radius) = cv2.minEnclosingCircle(c)

                #print("red radius :", radius)
                if (radius > RADIUS_MIN) and (radius <
                                              RADIUS_MAX):  # only proceed
                    # if the radius meets a minimum size
                    if red_circles is not None:
                        for (x2, y2, r2) in red_circles:
                            d = np.sqrt((x - x2)**2 + (y - y2)**2)  # compute
                            #print(d)
                            # distance between hough circles & minEnclo
                            if d < 20 and (r2 > RADIUS_MIN) and (r2 <
                                                                 RADIUS_MAX):
                                detected_red_balls.append((x, y, radius))

    # Blue  Contours
    blue_contours = cv2.findContours(blue_mask.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)[-2]
    if len(blue_contours
           ) > 0:  # only proceed if at least one contour was found
        # Find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and centroid
        for c in blue_contours:
            approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)
            area = cv2.contourArea(c)
            if ((len(approx) > 8) & (area > 30)):
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                if (radius > RADIUS_MIN) and (
                        radius < RADIUS_MAX):  # only proceed if the radius
                    # meets a minimum size

                    if blue_circles is not None:
                        for (x2, y2, r2) in blue_circles:
                            d = np.sqrt((x - x2)**2 + (y - y2)**2)  # compute
                            # distance between hough circles & minEnclo
                            if d < 20 and (r2 > RADIUS_MIN) and (r2 <
                                                                 RADIUS_MAX):
                                detected_blue_balls.append((x, y, radius))

    # Green Contours
    green_contours = cv2.findContours(green_mask.copy(), cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)[-2]
    if len(green_contours
           ) > 0:  # only proceed if at least one contour was found
        # Find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and centroid
        for c in green_contours:
            approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)
            area = cv2.contourArea(c)
            if ((len(approx) > 8) & (area > 30)):
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                #print("green radius :", radius)
                if green_circles is not None:
                    if (radius > RADIUS_MIN) and (
                            radius < RADIUS_MAX):  # only proceed if the radius
                        # meets a minimum size
                        for (x2, y2, r2) in green_circles:
                            d = np.sqrt((x - x2)**2 + (y - y2)**2)
                            if (d < 20) and (r2 > RADIUS_MIN) and (r2 <
                                                                   RADIUS_MAX):
                                detected_green_balls.append((x, y, radius))

    # Apply Non-Maximum Suppression to the 3 sets, removes duplicate balls in
    # the image that have an overlap percentage of more than 0.5
    detected_red_balls = np.array(detected_red_balls)
    detected_blue_balls = np.array(detected_blue_balls)
    detected_green_balls = np.array(detected_green_balls)

    red_balls = np.array(non_max_suppression_fast(detected_red_balls, 0.5))
    blue_balls = np.array(non_max_suppression_fast(detected_blue_balls, 0.5))
    green_balls = np.array(non_max_suppression_fast(detected_green_balls, 0.5))

    # add a variable for color type

    for array in [red_balls, blue_balls, green_balls]:
        if not array.any():
            array = np.empty(array.shape)
            array[:] = np.nan

    balls = {"red": red_balls, "blue": blue_balls, "green": green_balls}
    #df = pd.DataFrame(columns = ["x", "y", "r", "c", "pos"], index = indexes)
    columns = ["x", "y", "r", "c", "pos"]
    df = pd.DataFrame(columns=columns)  # initialize the empty DataFrame

    for key in balls.keys():
        found_balls = balls[key]
        if found_balls is not None:
            print("[INFO] %.i %s balls detected." % (len(found_balls), key))
            for ball in found_balls:
                x, y, r = np.hsplit(ball, 3)

                # Check position of ball
                if y > bgr.shape[0] // 2:
                    pos = "B"
                else:
                    pos = "T"

                c = key[0]
                z = pd.DataFrame([[x, y, r, c.upper(), pos]], columns=columns)
                df = df.append(z, ignore_index=True)

            # draw the balls
            for x, y, r in list(found_balls):
                cv2.circle(bgr, (int(x), int(y)), int(r), (0, 255, 255), 2)
                cv2.circle(bgr, (int(x), int(y)), 3, (255, 255, 255), -1)

            cv2.line(bgr, (0, bgr.shape[0] // 2),
                     (bgr.shape[1], bgr.shape[0] // 2), (0, 255, 255), 5)

        else:
            # switch to pretty print
            print("[INFO] No Balls Found!")

    return df, bgr
예제 #24
0
            print 'Ship found!'
            cv2.waitKey(1500)

bboxes = np.delete(bboxes, (0), axis=0)
cv2.destroyAllWindows()

img_bboxes = image.copy()
for box in bboxes:
    cv2.rectangle(img_bboxes, (box[0], box[1]), (box[2], box[3]), (0, 0, 255),
                  2)

cv2.namedWindow('Bounding boxes', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Bounding boxes', img_bboxes.shape[1] / 2,
                 img_bboxes.shape[0] / 2)
cv2.imshow('Bounding boxes', img_bboxes)

# Non maximal supression
img_nms_bboxes = image.copy()
nms_bboxes = non_max_suppression_fast(bboxes, 0.3)

for box in nms_bboxes:
    cv2.rectangle(img_nms_bboxes, (box[0], box[1]), (box[2], box[3]),
                  (0, 255, 0), 2)

cv2.namedWindow('Non maximal supression', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Non maximal supression', img_nms_bboxes.shape[1] / 2,
                 img_nms_bboxes.shape[0] / 2)
cv2.imshow('Non maximal supression', img_nms_bboxes)
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #25
0
def eval_faster_rcnn(eval_model, imgPath, img_shape,
                              results_base_path, feature_node_name, classes, mode,
                              drawUnregressedRois=False, drawNegativeRois=False,
                              nmsThreshold=0.5, nmsConfThreshold=0.0, bgrPlotThreshold = 0.8):

    # prepare model
    image_input = input_variable(img_shape, dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name)
    dims_input = input_variable((1,6), dynamic_axes=[Axis.default_batch_axis()], name='dims_input')
    frcn_eval = eval_model(image_input, dims_input)

    #dims_input_const = cntk.constant([image_width, image_height, image_width, image_height, image_width, image_height], (1, 6))
    print("Plotting results from Faster R-CNN model for image.")
    # evaluate single image

    _, cntk_img_input, dims = load_resize_and_pad(imgPath, img_shape[2], img_shape[1])

    dims_input = np.array(dims, dtype=np.float32)
    dims_input.shape = (1,) + dims_input.shape
    output = frcn_eval.eval({frcn_eval.arguments[0]: [cntk_img_input], frcn_eval.arguments[1]: dims_input})

    out_dict = dict([(k.name, k) for k in output])
    out_cls_pred = output[out_dict['cls_pred']][0]
    out_rpn_rois = output[out_dict['rpn_rois']][0]
    out_bbox_regr = output[out_dict['bbox_regr']][0]

    labels = out_cls_pred.argmax(axis=1)
    scores = out_cls_pred.max(axis=1).tolist()

    if mode=="returntags":
        class Tag(object):
            def __init__(self, label, score, bbox):
                self.label = label
                self.score = score
                self.bbox = bbox

            def serialize(self):
                return {
                    'label': self.label,
                    'score': self.score,
                    'bbox': self.bbox,
                }
        results = []
        regressed_rois = regress_rois(out_rpn_rois, out_bbox_regr, labels, dims)


        nmsKeepIndices = apply_nms_to_single_image_results(regressed_rois, labels, scores,
                                                               nms_threshold=nmsThreshold,
                                                               conf_threshold=nmsConfThreshold)

        print(len(out_rpn_rois))
        imsave('./Temp/resized.jpg',imgDebug)
        for i in range(len(out_rpn_rois)):
            if labels[i] != 0:
                x = Tag(str(classes[labels[i]]), str(scores[i]), str(out_rpn_rois[i]))
                results.append(x)
        # return {}
        return results


    elif mode=="returnimage":
        evaluated_image_path = "{}/{}".format(results_base_path, 'evaluated_' + os.path.basename(imgPath))
        if drawUnregressedRois:
            # plot results without final regression
            imgDebug = visualizeResultsFaster(imgPath, labels, scores, out_rpn_rois, img_shape[2], img_shape[1],
                                              classes, nmsKeepIndices=None, boDrawNegativeRois=drawNegativeRois,
                                              decisionThreshold=bgrPlotThreshold)
            imsave(evaluated_image_path, imgDebug)
        else:
            # apply regression and nms to bbox coordinates
            regressed_rois = regress_rois(out_rpn_rois, out_bbox_regr, labels, dims)

            nmsKeepIndices = apply_nms_to_single_image_results(regressed_rois, labels, scores,
                                                               nms_threshold=nmsThreshold,
                                                               conf_threshold=nmsConfThreshold)
            

            img,allboxes = visualizeResultsFaster(imgPath, labels, scores, regressed_rois, img_shape[2], img_shape[1],
                                         classes, nmsKeepIndices=nmsKeepIndices,
                                         boDrawNegativeRois=drawNegativeRois,
                                         decisionThreshold=bgrPlotThreshold)
            # imsave(evaluated_image_path, img)
            allboxes=np.array(allboxes)

	        # perform non-maximum suppression on the bounding boxes
            pick = non_max_suppression_fast(allboxes, 0.6)
            # print("[x] after applying non-maximum, %d bounding boxes" % (len(pick)))
            black_bg = 0*np.ones_like(img)
	        # loop over the picked bounding boxes and extract each of the box
            for (startX, startY, endX, endY) in pick:
                roi=img[startY:endY,startX:endX]
                black_bg[startY:endY,startX:endX]=roi
            result = black_bg.copy()
            print(black_bg.shape)
            image = cv2.cvtColor(black_bg, cv2.COLOR_RGB2HSV)
            lower = np.array([18, 0, 0])
            upper = np.array([179, 255, 255])
            mask = cv2.inRange(image, lower, upper)
            result = cv2.bitwise_and(result,result, mask=mask)

            lengthThroughRotatedRectangle=[]
            lengthThroughManualCalculation=[]


            # length Calculation through Rotated Rectangle
            image=cv2.cvtColor(result,cv2.COLOR_BGR2GRAY)
            ret,thresh = cv2.threshold(image,127,255,0)


            working_image=thresh.copy()
            result_img=thresh.copy()
            working_image[:,:]=0
            result_img[:,:]=0
            kernel = np.ones((7,7), np.uint8)
            for (startX, startY, endX, endY) in pick:
                cord=(int(startX),int(startY),int(endX),int(endY))
                working_image[:,:]=0
                working_image[cord[1]:cord[3],cord[0]:cord[2]]=thresh[cord[1]:cord[3],cord[0]:cord[2]]
                result_img[cord[1]:cord[3],cord[0]:cord[2]]=thresh[cord[1]:cord[3],cord[0]:cord[2]]
                working_image=cv2.morphologyEx(working_image, cv2.MORPH_OPEN, kernel)
                contours, hierarchy = cv2.findContours(working_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
                if(len(contours)>0):
                    c = max(contours, key = cv2.contourArea)
                    rect = cv2.minAreaRect(c)
                    (x, y), (width, height), angle=rect
                    height=int(height)
                    width=int(width)
                    if width >height:
                        height=width
                    lengthThroughRotatedRectangle.append(height);


            # length Calculation through Manual process
            for (startX, startY, endX, endY) in pick:
                cord=(int(startX),int(startY),int(endX),int(endY))
                widthofRectangle=cord[2]-cord[0]
                NonZeroPixels=[]
                threshold=0.20;
                height=0
                width=0
                for i in range(startY,endY):
                    row=thresh[i,startX:endX]
                    NonZeroPixelsInRow=np.count_nonzero(row)
                    WidthRatioInRow=NonZeroPixelsInRow/widthofRectangle
                    if(WidthRatioInRow>threshold):
                        height=height+1
                        NonZeroPixels.append(NonZeroPixelsInRow)
                width=round(sum(NonZeroPixels)/len(NonZeroPixels))
                if(width > height):
                    height=width
                lengthThroughManualCalculation.append(height)


            # print("length through rotatedRectangle\n",lengthThroughRotatedRectangle)
            # print("length through ManualCalculation\n",lengthThroughManualCalculation)

            SpikesLength=[int((a+b)/2) for a,b in zip(lengthThroughRotatedRectangle,lengthThroughManualCalculation)]
            # print("spike length\n",SpikesLength)

            
            SpikesLength = [i * 0.1 for i in SpikesLength]
            print("Spike Lenght in cm",SpikesLength)

            # imsave(evaluated_image_path, thresh)
        return SpikesLength
    else:
        raise ValueError("Unsupported value found in 'mode' parameter")
while 1:
    date = time.localtime()
    ret, img = cap.read()
    img = cv2.resize(img,
                     None,
                     fx=scaling_factor,
                     fy=scaling_factor,
                     interpolation=cv2.INTER_AREA)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # cars_without_nms may contain overlaping bounding boxes,
    # the non maximum supression removes all the overlapping bounding boxes,
    # overlapThresh is the threshold, for more info visit pyimagesearch.com
    cars_without_nms = car_cascade.detectMultiScale(gray, 6, 5)
    cars = nms.non_max_suppression_fast(cars_without_nms, overlapThresh=0.1)

    #---------------------------------------------------------------------------------------------------------------------
    # removal of false positives and filtering out those cars outside of the detection region
    cars_inside_box = []
    if len(cars) != 0:
        # cars, which is a numpy array is changed to a list before removing false positives,
        # after removing the false positives, the result is changed back to numpy array
        cars = vehicle_tracking.removeFalsePositives(cars.tolist(),
                                                     LOWER_LIMIT=41,
                                                     UPPER_LIMIT=200)
        cars = np.array(cars)

        i = 0
        for (x, y, w, h) in cars:
            if y > detection_region[0][1] and y + h < detection_region[1][1]:
예제 #27
0
clone = image.copy()
boxes = []

for i in range(0, data.shape[0]):

    if data[i][PROB] > opts.prob:
        x1 = int(data[i][XCOORD])
        y1 = int(data[i][YCOORD])
        x2 = x1 + WINDOW_DIM
        y2 = y1 + WINDOW_DIM

        boxes.append([x1,y1,x2,y2])
        cv2.circle(clone, (int(data[i][XCOORD])+WINDOW_DIM/2, int(data[i][YCOORD])+WINDOW_DIM/2), 4, [0,0,255], -1) # FOR STU'S

boxes = np.array(boxes)
boxes_nonmax = non_max_suppression_fast(boxes, 0.5,opts.minsamples-1)

utils_cluster.drawClusterColorsNMS(clone, boxes_nonmax)
cv2.imwrite("output.jpg", clone)



#minHits = [0,1,3,5,7]
#overlapPercent = np.arange(0.0,1.05,0.05) 
#for hit in minHits:
#    nBoxes = []
#    for overlap in overlapPercent:
#        boxes_nonmax = non_max_suppression_fast(boxes, overlap,hit)
#        nBoxes.append(len(boxes_nonmax))
#    nBoxes = np.array(nBoxes)
#    nCars = [88 for i in range(len(overlapPercent))]
예제 #28
0
def main_ROI_NMS():
    warnings.filterwarnings('ignore', category=UserWarning, module='skimage')

    new_test_ids = []
    rles = []

    detector = None
    with CustomObjectScope({
            'relu6':
            applications.mobilenet.relu6,
            'DepthwiseConv2D':
            applications.mobilenet.DepthwiseConv2D
    }):
        detector = load_model('detector_MobileNet.h5')
    u_net = load_model("U-net/Unet(32x32).h5",
                       custom_objects={'mean_iou': mean_iou})

    dir_ = "../../data/stage1_test/"
    #dir_ = "../../data/1/"
    ids_test = os.walk(dir_)
    ids = next(ids_test)[1]
    for n, id_ in tqdm(enumerate(ids), total=len(ids)):
        try:
            path = dir_ + id_ + "/images/"
            image = imread(path + id_ + ".png", as_grey=True)

            images = []
            masks = []

            bounding_boxs = []
            bounding_boxs_nms = []

            # Уменьшаем размер картинки
            cur_size = (image.shape[0], image.shape[1])
            min_size = (image.shape[0] // MIN_RESIZE,
                        image.shape[1] // MIN_RESIZE)

            images.append(image)
            masks.append(np.zeros(cur_size, dtype=np.float64))
            while SIZE_IMAGE[0] <= cur_size[0] >= min_size[0] and SIZE_IMAGE[
                    1] <= cur_size[1] >= min_size[1]:
                cur_size = (int(cur_size[0] // COEF_RES),
                            int(cur_size[1] // COEF_RES))
                if SIZE_IMAGE[0] > cur_size[0] or SIZE_IMAGE[1] > cur_size[1]:
                    break

                images.append(
                    resize(image,
                           cur_size,
                           mode='constant',
                           preserve_range=True))
                masks.append(np.zeros(cur_size, dtype=np.float64))
                # masks[-1].fill(0.45)

            # Предсказание
            for i in range(len(images)):
                im = images[i]

                # Проход детектора
                bbs = []
                for h in range(SIZE_IMAGE[0], im.shape[0] + STRIDES[0],
                               STRIDES[0]):
                    lower_bound = min(h, im.shape[0])
                    for w in range(SIZE_IMAGE[1], im.shape[1] + STRIDES[1],
                                   STRIDES[1]):
                        right_bound = min(w, im.shape[1])

                        part_image_analis = np.ndarray(
                            (1, SIZE_IMAGE[0], SIZE_IMAGE[1], 1),
                            dtype=np.float64)
                        part_image_analis[0] = im[lower_bound - SIZE_IMAGE[0]: lower_bound,
                                               right_bound - SIZE_IMAGE[1]: right_bound] \
                            .reshape((SIZE_IMAGE[0], SIZE_IMAGE[1], 1))

                        is_nucl = detector.predict(part_image_analis)[0][0]

                        if is_nucl > 0.99:
                            bbs.append((right_bound - SIZE_IMAGE[0],
                                        lower_bound - SIZE_IMAGE[1],
                                        right_bound, lower_bound))

                bounding_boxs.append(bbs)
                bounding_boxs_nms.append(
                    non_max_suppression_fast(np.array(bbs), 0.6))

                # Предсказание по NMS
                for box in bounding_boxs_nms[-1]:
                    im_to_unet = np.ndarray(
                        (1, SIZE_IMAGE[0], SIZE_IMAGE[1], 1), dtype=np.float64)
                    im_to_unet[0] = im[box[1]:box[3], box[0]:box[2]].reshape(
                        (SIZE_IMAGE[0], SIZE_IMAGE[1], 1))
                    pred_mask = u_net.predict(im_to_unet)[0]
                    copy_arr_to_arr(masks[i][box[1]:box[3], box[0]:box[2]],
                                    pred_mask.reshape(SIZE_IMAGE),
                                    "mean_except_zero")

            masks_resize = np.zeros(
                (len(masks), image.shape[0], image.shape[1]), dtype=np.float64)
            masks_resize[0] = masks[0]
            for j in range(1, len(masks)):
                masks_resize[j] = resize(masks[j],
                                         (image.shape[0], image.shape[1]),
                                         mode='constant',
                                         preserve_range=True)
                copy_arr_to_arr(masks_resize[0],
                                masks_resize[j],
                                mode="mean_except_zero")

            # Кодирование
            mask_to_encde = masks_resize[0]

            rle = list(prob_to_rles(mask_to_encde, cutoff=0.5))
            rles.extend(rle)
            new_test_ids.extend([id_] * len(rle))
            if len(rle) == 0:
                rles.extend([[1, 1]])
                new_test_ids.extend([id_])

            save_result("../../data/detector_unet_pred/{}.png".format(id_),
                        image, mask_to_encde, masks, bounding_boxs,
                        bounding_boxs_nms)

        except StopIteration:
            print("Exception id: " + id_)

    # Create submission DataFrame
    sub = pd.DataFrame()
    sub['ImageId'] = new_test_ids
    sub['EncodedPixels'] = pd.Series(rles).apply(
        lambda x: ' '.join(str(y) for y in x))

    # Чистка от мусора
    def f(x):
        res = 0
        for i in range(1, len(x), 2):
            res += i
        return res

    sub['CountPix'] = pd.Series(rles).apply(f)
    sub = sub[sub['CountPix'] > 10][['ImageId', 'EncodedPixels']]

    sub.to_csv('detector_Unet.csv', index=False)
예제 #29
0
cv2.imshow("closing", closing)
'''kernel2 = np.ones((3, 3), np.uint8)
erode = cv2.erode(closing, kernel2, iterations = 1)
cv2.imshow("erode", erode)'''

#contours, hierarchy = cv2.findContours(img_result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

can = []

for i in range(len(contours)):
    x, y, w, h = cv2.boundingRect(contours[i])
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
    if x != 0 and y != 0:
        can.append([x, y, x + w, y + h])

# nms
result = nms.non_max_suppression_fast(np.array(can), 0.3)
print(len(result))
for i in range(len(result)):
    x, y, w, h = result[i][0], result[i][1], result[i][2], result[i][3]
    cv2.rectangle(grayImg, (x, y), (w, h), (0, 255, 0), 2)

#cv2.imshow("asd2", img_result2)
#cv2.imshow("asd", img_result)
cv2.imshow("original", img)
cv2.imshow("nms", grayImg)

cv2.waitKey()
cv2.destroyAllWindows()
예제 #30
0
파일: detector.py 프로젝트: axelBarroso/m3
def testImage(imagePath, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    fileList = os.listdir(cfg.modelRootPath)

    # Filter all model files
    modelsList = filter(lambda element: '.model' in element, fileList)

    # Filter our specific feature method
    currentModel = cfg.model+'_'+cfg.modelFeatures
    currentModelsList = filter(lambda element: currentModel in element, modelsList)

    models = []
    subImages = [] #To save backgorund crops

    for modelname in currentModelsList:

        file = open(cfg.modelRootPath + modelname, 'r')
        svc = pickle.load(file)
        models.append(svc)

        file.close()

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None


    for p in pyramid[0:]:
        #We now have the subsampled image in p
        window_shape = (32,32)

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            p = pad(p,cfg.padding,'reflect')

        try:
            views = view_as_windows(p, window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape

        for row in range(0, num_rows):
            for col in range(0, num_cols):
                #Get current window
                subImage = views[row, col]
                # subImages.append(subImage)   #To save backgorund crops: Accumulate them in an array
                #Extract features
                feats = feature_extractor.extractFeatures(subImage)

                #Obtain prediction score for each model
                for model in models:

                    decision_func = model.decision_function(feats)

                    # if decision_func > decisionThreshold:
                    if decision_func > 0.2:  #For bootstrapping
                        # Signal found!
                        h, w = window_shape
                        scaleMult = math.pow(cfg.downScaleFactor, scale)

                        x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
                        y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
                        x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
                        y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))

                        #bootstrapping: Save image (if positive)
                        subImages.append(subImage)

                        bbox = (x1, y1, x2, y2)
                        score = decision_func[0]

                        if boxes is not None:
                            boxes = np.vstack((bbox, boxes))
                            scores = np.hstack((score, scores))
                        else:
                            boxes = np.array([bbox])
                            scores = np.array([score])
                        break

        scale += 1


    # To save backgorund crops
    # numSubImages = len(subImages)
    # for x in range(0,10): #Save 10 crops for each background image
    #     randomIndex = random.randint(1,numSubImages-1) #Get a random window index
    #     imageName = imagePath.split('/')  #Working on the crop name...
    #     imageName = imageName[len(imageName)-1]
    #     filename = (imageName[:-4]+'-'+str(x)+'.jpg')
    #     io.imsave('Results/'+filename, subImages[randomIndex])  #Save the crop
    #end To save backgorund crops

    # To save bootstrapping windows
    numSubImages = len(subImages)
    length = min(10, len(subImages))
    for x in range(0,length) : #Save windows with detections (max 10)
        if numSubImages == 1:
            randomIndex = 0
        else:
            randomIndex = random.randint(1, numSubImages-1) #Get a random window index
        imageName = imagePath.split('/')  #Working on the crop name...
        imageName = imageName[len(imageName)-1]
        filename = (imageName[:-4]+'-'+str(x)+'.jpg')
        io.imsave('Bootstrapping/'+filename, subImages[randomIndex])  #Save the crop
    #end To save bootstrapping windows


    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores, cfg.nmsOverlapThresh)

    return boxes, scores
예제 #31
0
def nms_core(boxes,
             olTh,
             selected,
             age_based_check=False,
             testMode=False,
             enObjPropExp=False,
             verbose=0,
             confThH=0.4):
    if age_based_check:
        sorted_indices = np.argsort(boxes[:, 5] - boxes[:, 6])
    else:
        sorted_indices = np.argsort(boxes[:, 5])

    if verbose > 0:
        print "sorted_indices "
        print sorted_indices

    #make it decending order
    sorted_indices = (sorted_indices)[::-1]
    if verbose > 0:
        print "before sorting"
        print boxes
    boxes = boxes[sorted_indices]
    if verbose > 0:
        print "after sorting"
        print sorted_indices
        print boxes

    suppress = []
    for index, box in enumerate(boxes):
        suppress.append(False)

    for oIdx, box in enumerate(boxes):
        if suppress[oIdx] == False:
            #if cur box itself is supppressed then do not let it suppress any other box
            for iIdx in range(oIdx + 1, len(boxes)):
                #let oIdx suppress only if it has same label as iIdx
                if boxes[iIdx, 4] == boxes[oIdx, 4]:
                    ol = findOverlap(boxes,
                                     oIdx,
                                     iIdx,
                                     olMode='area_box_under_sup',
                                     verbose=False)
                    suppress[iIdx] = suppress[iIdx] or (ol > olTh)
                    if (verbose > 0) and (ol > olTh):
                        print "========================"
                        print "oIdx is suppressing iIdx", oIdx, iIdx
                        print(boxes[oIdx, 5], "is suppressing", boxes[iIdx, 5])

                    # age and strongness update for the suppressed box
                    if (ol > olTh) and enObjPropExp:
                        #if high conf det is suppressing then reset the age of the track
                        #else take age info from iBox
                        if boxes[oIdx, 5] > confThH:
                            boxes[oIdx, 6] = 0
                        else:
                            boxes[oIdx, 6] = boxes[iIdx, 6]

                        #if oBox is suppressing iBox then keep strng_trk True if either of the
                        #boxes was strng
                        boxes[oIdx, 7] = boxes[oIdx, 7] or boxes[iIdx, 7]

                    if (verbose > 1) and suppress[iIdx]:
                        print "oIdx,iIdx", oIdx, iIdx
                        print "boxes[oIdx]"
                        print boxes[oIdx]
                        print "boxes[iIdx]"
                        print boxes[iIdx]

    selIdx = 0
    selectedBoxes = []
    for sup, box in zip(suppress, boxes):
        if sup == False:
            selectedBoxes.append(box)

    if verbose > 0:
        print suppress
        print "final boxes"
        print selectedBoxes

    selectedBoxes = np.asarray(selectedBoxes)

    #######Test NMSed boxes with ref implementation
    if testMode == True:
        from nms import non_max_suppression_fast
        boxes_ref = non_max_suppression_fast(boxes,
                                             olTh,
                                             selected,
                                             age_based_check=age_based_check)
        result = True
        if len(selectedBoxes) != len(boxes_ref):
            result = False
        else:
            for box_ti, box_os in zip(selectedBoxes, boxes_ref):
                err = np.sum(box_ti - box_os)
                if err != 0:
                    result = False

        if result == False:
            print "===========NMS Failed================"
            print boxes
            print "result TI selectedBoxes"
            print selectedBoxes
            print "result boxes_ref"
            print boxes_ref
            sys.exit(0)

    return selectedBoxes