Exemplo n.º 1
0
def find_blobs(image,size):
    center_x = size[0]/2
    center_y = size[1]/2
    params = cv2.SimpleBlobDetector_Params()
    params.filterByCircularity = True
    params.minCircularity = 0
    params.maxCircularity = 1
    params.minDistBetweenBlobs = 5
    params.filterByArea = True
    params.minArea = 1
    params.maxArea = 500000
    params.filterByInertia = True
    params.minInertiaRatio = 0
    params.maxInertiaRatio = 1
    params.filterByConvexity = True
    params.minConvexity = 0
    params.maxConvexity = 1
    detector = cv2.SimpleBlobDetector_create(params)
    found_blobs = detector.detect(image)
    blobs = []
    for blob in found_blobs:
        blob = Blob(blob.pt[0]-20, blob.pt[1]-20, blob.size/2)
        blob.set_center(center_x, center_y)
        if not is_score(blob, size):
            blobs.append(blob)
    #image = cv2.drawKeypoints(image, found_blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    #cv2.imshow("Keypoints", image)
    #cv2.waitKey(0)
    return blobs
    def _apply_fun(self, blob_generator, fun):
        threadsafe_generator = ThreadsafeIter(blob_generator,len(self.pipelines))
        executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.pipelines))
        # Start the load operations and mark each future with its URL
        futures = [executor.submit(fun, pipeline, threadsafe_generator) for pipeline in self.pipelines]
        generators = [gen for gen in concurrent.futures.as_completed(futures)]
        has_more_blobs = True
        while has_more_blobs:
            b = Blob()
            out_blobs = [next(gen.result()) for gen in generators]
            blob_uuids = [blob.meta.uuid for blob in out_blobs]
            # If not all UUIDs are equal or num outputs is not the same as the number of pipelines
            if not out_blobs or blob_uuids.count(blob_uuids[0]) != len(blob_uuids) or len(out_blobs) != len(self.pipelines):
                logging.error("Number of elements changed within ParallelAlgorithm pipelines. This is not allowed!")
                raise Exception("Error")

            # If there are no more blobs, we are done
            if len(out_blobs) == 0:
                has_more_blobs = False
            else:
                b.data = [blob.data.ravel() for blob in out_blobs]
                b.data = hstack(b.data)
                b.meta = out_blobs[0].meta
                yield b
        logging.info("Finished training in ParallelAlgorithm")
Exemplo n.º 3
0
def test_blob(image):
    regions = Region.find_regions_by_border_color(image)
    for i, region in enumerate(regions):
        region_mask = region.mask_image(image)
        blobs = region.find_blobs(image)
        Blob.draw_outlines(blobs, region_mask, (234, 34, 102))         
        utils.show_image(region_mask, time=1000)
Exemplo n.º 4
0
    def rectangle(x0,y0,x1,y1,res,math):
        '''Create a rectangular blob of charge given coordinates which represent 
        diagonal corners.

        Params:
                res ->  Correlated with the 'resolution' of the rectangle. Larger
                        will lead to longer and finer calculations

                math -> Math is the string containing a Polish notation function
                        of X, Y (TODO: add T). It is parsed into a function
                        using mathparse.math_parse'''

        rect = Blob()                       ## Initialize a new Blob...

        x0, y0, x1, y1 = float(x0), float(y0), float(x1), float(y1)

        xs = linspace(x0,x1, res/(y1-y0))   ## Set up a vertical and horizontal line
        ys = linspace(y0,y1, res/(x1-x0))   ## of charges

        for x in xs:                        ## Basically set up a meshgrid of x and y
            for y in ys:                    ## TODO: Look into replacing with Meshgrid.

                rect.add_point(Point(x,y))  ## Add each point to the blob

        f = math_parse(str(math))           ## Parse the Polish notation into python
                                            ## syntax.

        rect.math = lambda x, y: eval(f)    ## Evaluate the python syntax string
                                            ## into an expression, and bind it to
                                            ## a function at the math method of
                                            ## our blob

        blobs.append(rect)                  ## Put it on the list for later flattening
Exemplo n.º 5
0
def blob_detector2(image, stepSize, windowSize):
    blobs = []

    print("loading model...")
    model = load_model(
        '/Users/2020shatgiskessell/Desktop/New_Mole_Detector/Mole_Detector_1_3/my_mole_model_2.h5'
    )

    for y in range(0, image.shape[0], stepSize):
        for x in range(0, image.shape[1], stepSize):
            # identify moles on  current window
            #blobs.append(identify_moles(image[y:y + windowSize[1], x:x + windowSize[0]]))
            roi = image[y:y + windowSize[1], x:x + windowSize[0]]
            try:
                roi = cv2.resize(roi, (8, 8))
            except Exception:
                continue
            roi = np.expand_dims(roi, axis=2)
            roi = np.expand_dims(roi, axis=0)
            pred = model.predict(roi)
            pred = pred.round()
            if int(pred[0][0]) == 1:
                blob = Blob()
                blob.x = x
                blob.y = y
                #blob.matrix = component_matrix
                blob.roi = roi
                blobs.append(blob)
    return blobs
Exemplo n.º 6
0
    def parametric_curve(s1, s2, f1, f2, res, math):
        ''' Creates a blob which is a parametric curve of the form f(x,y) = (g(t),h(t))

        s1, s2 donate the domain of the parameter, fx is the x component, fy is the y compenent
        '''
        curve = Blob()                  #New blob

        s1,s2 = float(s1), float(s2)

        domain = linspace(s1,s2,res)    # Domain of the parameterizing variable


        f1_func = lambda s: eval(math_parse(f1))  # We're gonna have a two part math here, in
        f2_func = lambda s: eval(math_parse(f2))  # addition to the usual to calculate x and y


        calc_charges = lambda x, y: eval (math_parse(str(math)))


        xs = f1_func(domain)
        ys = f2_func(domain)

        for x,y in zip(xs, ys):
            curve.add_point(Point(x,y))

        curve.math = calc_charges

        blobs.append(curve)
Exemplo n.º 7
0
 def compute(self, image):
     b = Blob()
     if isinstance(image, str):
         b.meta.imagepath = image
     else:
         b.data = image
     # Use compute all here to incorporate custom compute all functions
     return next(self.pipeline.compute([b])).data
    def _train(self, blob_generator):
        # First, collect all elements of the input
        data = []
        labels = []
        metas = []
        for blob in blob_generator:
            if self.use_sparse is None:
                # Determine automatically by comparing size
                sparse_vec = scipy.sparse.csr_matrix(blob.data.ravel())
                sparse_memory_req = sparse_vec.data.nbytes + sparse_vec.indptr.nbytes + sparse_vec.indices.nbytes
                self.use_sparse = sparse_memory_req < blob.data.nbytes
                logging.debug(
                    'Using sparse format for collecting features: %s' %
                    self.use_sparse)
                logging.debug('Blob data needs %i' % blob.data.nbytes)
                logging.debug('%i with sparse vs %i with dense' %
                              (sparse_memory_req, blob.data.nbytes))

            if self.use_sparse:
                data.append(scipy.sparse.csr_matrix(blob.data.ravel()))
            else:
                data.append(blob.data.ravel())
            labels.append(blob.meta.label)
            metas.append(blob.meta)

        # Stack data to matrix explicitly here, as both fit and predict
        # would to this stacking otherwise
        try:
            if self.use_sparse:
                data = scipy.sparse.vstack(data)
                data = data.astype(np.float64)
            else:
                data = np.array(data, dtype=np.float64)
        except ValueError:
            logging.error(
                "Length of all feature vectors need to be the same for Classificator training."
            )
            raise Exception

        logging.warning(
            'Training the model with feature dim %i, this might take a while' %
            data.shape[1])
        self.model.fit(data, labels)
        logging.warning('Finished')

        for (d, m) in zip(self.model.decision_function(data), metas):
            b = Blob()
            b.data = d
            b.meta = m
            yield b
Exemplo n.º 9
0
 def _gen_inblobs(self, images, prog_bar=True):
     if prog_bar:
         im_gen = pyprind.prog_bar(list(images))
     else:
         im_gen = list(images)
     for im in im_gen:
         if isinstance(im, Blob):
             b = im
         else:
             b = Blob()
             if isinstance(im, str):
                 b.meta.imagepath = im
             else:
                 b.data = im
         yield b
Exemplo n.º 10
0
    def circle(x0,y0,radius,start_arc,end_arc,numpoints,math):
        """Create a circle of charge by using polar notation. Circle will have a start and stop end_arc
        given in degrees (for covinience)."""

        circle = Blob()                  ##Spiffy new blob

        x0, y0, start_arc, end_arc = float(x0), float(y0), float(start_arc), float(end_arc)

        theta = radians(linspace(start_arc,end_arc,numpoints)) #figure out our angles

        xs = radius*cos(theta)+x0                              #Build up our xs and ys
        ys = radius*sin(theta)+y0

        for x,y in zip(xs,ys):
            circle.add_point(Point(x,y))

        f = math_parse(str(math))                             #Parse some math for the charges
        circle.math = lambda x, y: eval(f)

        blobs.append(circle)                     #Stick into the main blob
Exemplo n.º 11
0
    def line(x0,y0,x1,y1,res,math):
        '''Create a linear blob given start and stop corridnates, a number of points
        on the line, and a math expression to be parsed by mathparse.math_parse.'''

        lin = Blob()                        ## Iniitialize a new blob

        x0, y0, x1, y1 = float(x0), float(y0), float(x1), float(y1)

        horizontal = linspace(x0,x1,res)    ## Make a line along x

        m = (y1-y0)/(x1-x0)                 ## Map horizontal onto the line
        b = y0- m*(x1-x0)                   ## using y-mx + b

        for x,y in zip(horizontal, (m*horizontal) + b):
            lin.add_point(Point(x,y))


        f = math_parse(str(math))                ## Parse and assign math to blob.math()
        lin.math = lambda x, y: eval(f)

        blobs.append(lin)                   ## Put it on the list for later flattening
Exemplo n.º 12
0
def setup():
    global blob
    size(640, 480)
    background(250)
    no_stroke()
    splat = 3
    stride = 5
    number_of_blobs = 1
    blob = [0] * number_of_blobs
    print(blob)
    for n in range(len(blob)):
        blob[n] = Blob(width, height, splat, stride)
    def _train(self, blob_generator):
        # If you need all data at once:
        # Remember the metas!
        # Example
        data = []
        labels = []
        metas = []
        for blob in blob_generator:
            data.append(blob.data.ravel())
            labels.append(blob.meta.label)
            metas.append(blob.meta)
        numpy_data = vstack(data)

        # process numpy_data
        # ...

        # Create generator for next layer
        for d, m in zip(data, metas):
            b = Blob()
            b.data = d
            b.meta = m
            yield b
Exemplo n.º 14
0
def main():
    parser = argparse.ArgumentParser(description='Test blob functionality')
    parser.add_argument('--file', '-f', dest='file', default='test_images/balls.png', help='file to test', type=str)
    args = parser.parse_args()
    
    image = cv2.imread(args.file)
    blobs = Blob.find_blobs(image)
    for blob in blobs:
        blob.draw_outline(image, color=(0, 255, 0))
        blob.draw_fill(image, color=(255,0,0))
        print blob

    cv.NamedWindow('a_window', cv.CV_WINDOW_AUTOSIZE)
    cv2.imshow('a_window', image)
    cv.WaitKey(10000)
    def _train(self, blob_generator):
        # First, collect all elements of the input
        data = []
        labels = []
        metas = []
        for blob in blob_generator:
            data.append(self._add_bias(blob.data.ravel()))
            labels.append(blob.meta.label)
            metas.append(blob.meta)
        try:
            data = vstack(data)
        except ValueError:
            logging.error(
                "Size of all input data needs to be the same for SVM training."
            )
            raise Exception

        self.svm_model.fit(data, labels)

        for (d, m) in zip(self.svm_model.predict(data), metas):
            b = Blob()
            b.data = d
            b.meta = m
            yield b
Exemplo n.º 16
0
    def blob_generator(self):
        if len(self.imagepaths) != len(self.labels) or len(self.labels) != len(
                self.split_assignments):
            logging.error(
                "Size of imagepaths, labels and split assignments are not equal!"
            )
            raise Exception

        for path, label, split_assignment, idx in zip(self.imagepaths,
                                                      self.labels,
                                                      self.split_assignments,
                                                      range(len(self.labels))):
            b = Blob()
            b.meta.label = label
            b.meta.imagepath = path
            b.meta.split_assignment = split_assignment
            yield b
Exemplo n.º 17
0
 def find_blobs(self, image):
     blobs = Blob.find_blobs(self.mask_image(image))
     self.blobs = [blob for blob in blobs if blob.area < self.area*self.MAX_BLOB_RATIO]
     return self.blobs
Exemplo n.º 18
0
def compute_stats(num_labels, labels, stats, centroids, img, x_i, y_i, i, og):
    blobs = []
    #start = timeit.default_timer()
    #get component centroid coordinates
    x, y = centroids[i]
    #compute statistics
    # cv2.CC_STAT_LEFT The leftmost (x) coordinate which is the inclusive start of the bounding box in the horizontal direction.
    # cv2.CC_STAT_TOP The topmost (y) coordinate which is the inclusive start of the bounding box in the vertical direction.
    # cv2.CC_STAT_WIDTH The horizontal size of the bounding box
    # cv2.CC_STAT_HEIGHT The vertical size of the bounding box
    # cv2.CC_STAT_AREA The total area (in pixels) of the connected component

    width = stats[i, cv2.CC_STAT_WIDTH]
    height = stats[i, cv2.CC_STAT_HEIGHT]
    if height > 20:
        return None, None, None, None, None, None
    radius = (height + width) / 2
    area = stats[i, cv2.CC_STAT_AREA]

    #these are the top left x, y coordinates = ONLY TO BE USED FOR GETTING ROI
    x_l = stats[i, cv2.CC_STAT_LEFT]
    y_l = stats[i, cv2.CC_STAT_TOP]

    #compute line
    #slope, y_int= compute_lobf(x,y,x_l,y_l)

    #stop = timeit.default_timer()
    #print('Time to calculate properties: ', stop - start)

    #remove everything except for component i to create isolated component matrix
    #get connected component roi
    roi = og[y_l - 1:y_l + height + 1, x_l - 1:x_l + width + 1]
    #stop = timeit.default_timer()
    #print('Time to create cm and roi: ', stop - start)

    #----------------MEASURES-------------------------------------------------------------------
    #radius = (height + width)/2

    #compute more statistics related to roundness
    radius = np.sqrt((area / np.pi))
    formfactor = compute_formfactor(radius, area)
    bounding_box_area_ratio = area / (height * width)
    if height > width:
        roundness = compute_roundness(height, radius, area)
        aspect_ratio = height / width
    else:
        roundness = compute_roundness(width, radius, area)
        aspect_ratio = width / height

    #print('Time to calculate heuristic properties: ', stop - start)
    # if x >300 and y < 150:
    #     print ("(" + str(x) + ","+str(y)+") -> area ratio: " + str(area/(height*width)))
    #print ("(" + str(x) + ","+str(y)+") -> " + "radius: " + str(radius) + ", formfactor: " + str(formfactor) + ", roundness: " + str(roundness) + ", aspect ratio: " + str(aspect_ratio))
    #print ("(" + str(x) + ","+str(y)+")")
    #print ("\n")

    #calculates line of best fit and error
    try:
        cord1, cord2, error = compute_lobf(roi, x_l * y_l)
        x1, y1 = cord1
        x2, y2 = cord2
    except TypeError:
        print("cant calculate line of best fit")
        error = 0

    cv2.imwrite(
        os.path.join(
            "/Users/2020shatgiskessell/Desktop/New_Mole_Detector/ANN_Images_Fiverr",
            "roi19" + str(i) + ".png"), roi)

    #COMMENT OUT WHEN COLLECTING ANN DATA!!!!!
    #if the error is below 16, (the line of best fit closely matches connect component) return none
    if error < 16:
        return None, None, None, None, None, None

    # print ("(" + str(x1) + ","+str(y1)+")")
    # print ("(" + str(x2) + ","+str(y2)+")")
    # print ("\n")

    #next step: calculate residuals and do some sort of analysis

    #try  and bounding_box_area_ratio >= 0.5
    if roundness > 0.2 and 0.9 < aspect_ratio < 3 and 1.1 >= formfactor > 0.9:
        blob = Blob()
        blob.radius = radius
        blob.x = x + x_i
        blob.y = y + y_i
        #blob.matrix = component_matrix
        blob.roi = roi
        blobs.append(blob)

    return blobs
Exemplo n.º 19
0
 def __init__(self, x_boundary, y_boundary):
     Blob.__init__(self, (0, 255, 0), x_boundary, y_boundary)
Exemplo n.º 20
0
    def start(self, Image, ROIs):
        """Image processing function.

        \param image (the enhanced and segmented image)
        \return image (the annotated image )

         local variable is the list of detected blobs with the following feature columns:
         [bb_left,bb_top,bb_width,bb_height, cc_area, sharpness, SNR]

         Sharpness is variation of the Laplacian (introduced by Pech-Pacheco
         "Diatom autofocusing in brightfield microscopy: a comparative study."

        """
        try:
            self.startTimer()
            self.image = Image
            self.ROIs = ROIs
            self._Blobs = list()

            # Iterate ROis
            for ROI in ROIs:
                # slice image, assuming ROI:(left,top,width,height)
                ROI_image = self.image[ROI[1]:ROI[1] + ROI[3],
                                       ROI[0]:ROI[0] + ROI[2]]

                # Binarize and find blobs
                BWImage = cv2.adaptiveThreshold(ROI_image, 255,
                                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                self.invBin, self.blocksize,
                                                self.offset)

                # ConnectedComponentsWithStats output: number of labels, label matrix, stats(left,top,width,height), area
                _, _, blobFeatures, _ = cv2.connectedComponentsWithStats(
                    BWImage, 8, cv2.CV_32S)

                # Get blob RoI and area
                blobFeatures = blobFeatures[
                    1:]  # skipping background (label 0)

                # Filter by blob area
                blobFeatures = blobFeatures[np.where(
                    (blobFeatures[:, cv2.CC_STAT_AREA] > self.minBlobArea)
                    & (blobFeatures[:, cv2.CC_STAT_AREA] < self.maxBlobArea))]

                # Increase array size
                blobFeatures = np.concatenate([
                    blobFeatures,
                    np.zeros((blobFeatures.shape[0], 3), dtype=int)
                ],
                                              axis=1)

                # Annotate blobs and compute additional features

                for blob in blobFeatures:

                    tl = (blob[0], blob[1])
                    br = (blob[0] + blob[2], blob[1] + blob[3])

                    # Compute some metrics of individual blobs
                    tempImage = self.image[tl[1]:br[1], tl[0]:br[0]]
                    I_0 = 255.0 - np.min(
                        tempImage)  # peak foreground intensity estimate
                    I_b = 255.0 - np.max(tempImage)  # background intensity

                    # Shift coordinates wrt ROI
                    blob[0] += ROI[0]
                    blob[1] += ROI[1]

                    #centroid
                    cX = int((blob[0] + blob[0] + blob[2]) / 2.0)  # x1+x2 /2
                    cY = int((blob[1] - blob[3] + blob[1]) / 2.0)  # y1+y2 /2

                    DetectedBlob = Blob(blob[0], blob[0] + blob[2],
                                        blob[1] - blob[3], blob[1], (cX, cY))

                    # Local sharpness column
                    DetectedBlob._local_sharpness = int(
                        cv2.Laplacian(tempImage, cv2.CV_64F).var())

                    # Local SNR column
                    DetectedBlob._local_SNR = int(
                        (I_0 - I_b) / np.sqrt(I_b)) if I_b > 0 else 0

                    # Perimeter
                    tempBWImage = BWImage[tl[1]:br[1], tl[0]:br[0]]
                    contours, _ = cv2.findContours(tempBWImage, cv2.RETR_LIST,
                                                   cv2.CHAIN_APPROX_NONE)
                    contour = max(
                        contours,
                        key=cv2.contourArea)  # select largest contour
                    DetectedBlob._perimeter = len(contour)

                    #Add blob to list
                    self._Blobs.append(DetectedBlob)

                    # Mark in image
                    # if self.plot:
                    # cv2.rectangle(ROI_image, tl, br, (0, 0, 0), 1)
                    #cv2.putText(ROI_image, str(blob[5]), br, cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1, cv2.LINE_AA)

            # Plot last ROI
            if self.plot:
                cv2.imshow(self.name, BWImage)

            # Finalize
            self.stopTimer()
            self.signals.finished.emit()

        except Exception as err:
            exc = traceback.format_exception(type(err),
                                             err,
                                             err.__traceback__,
                                             chain=False)
            self.signals.error.emit(exc)
            self.signals.message.emit('E: {} exception: {}'.format(
                self.name, err))

        return self.image
Exemplo n.º 21
0
def main():
    filepath = videopath
    print filepath
    video = cv2.VideoCapture("./" + videopath)

    width = video.get(cv2.CAP_PROP_FRAME_WIDTH)  # float video.get(3)
    height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float video.get(4)

    if not video.isOpened():
        print("Video not Opened!")
        return

    #Maybe we should check if the video has at least 2 frames

    #Read the first and second frame of the video to start doing processing on them
    _, imgFrame1 = video.read()
    _, imgFrame2 = video.read()

    #start framecount as 2 because we just read 2 frames
    atFrame = 2
    #up to this point we have none blobs yet
    blobs = []

    # To count people and pass it as a parameter. It doens't work with primitive variables (int)
    peopleCount = [0]
    seenPeople = set()

    #While the video is open and we don't press q key read, process and show a frame
    while (video.isOpened()):

        #for every frame, check how many blobs are in the screen
        currentBlobs = []

        imgFrame1Copy = copy.deepcopy(imgFrame1)
        imgFrame2Copy = copy.deepcopy(imgFrame2)

        imgFrame1Copy = cv2.cvtColor(imgFrame1Copy, cv2.COLOR_BGR2GRAY)
        imgFrame2Copy = cv2.cvtColor(imgFrame2Copy, cv2.COLOR_BGR2GRAY)

        if (debugGaussian and debug_mode):
            cv2.imshow('gaussianBlurBefore-Img1', imgFrame1Copy)
            cv2.imshow('gaussianBlurBefore-Img2', imgFrame2Copy)

        imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy, gaussian_kernel, 0)
        imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy, gaussian_kernel, 0)

        if (debugGaussian and debug_mode):
            cv2.imshow('gaussianBlurAfter-Img1', imgFrame1Copy)
            cv2.imshow('gaussianBlurAfter-Img2', imgFrame2Copy)

        imgDifference = cv2.absdiff(imgFrame1Copy, imgFrame2Copy)

        if (debugGaussian and debug_mode):
            cv2.imshow('dif-Img1-Img2', imgDifference)
        # ret value is used for Otsu's Binarization if we want to
        # https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html
        ret, imgThresh = cv2.threshold(imgDifference, threshold_value, 255.0,
                                       cv2.THRESH_BINARY)

        if debugThreshold and debug_mode:
            cv2.imshow('imgThresh', imgThresh)

        #all the pixels near boundary will be discarded depending upon the size of kernel. erosion removes white noises
        imgThresh = cv2.dilate(imgThresh, kernel_dilate1, iterations=1)
        if debug_dilate:
            cv2.imshow('dilate-dilate1', imgThresh)
        imgThresh = cv2.erode(imgThresh, kernel_erode1, iterations=1)
        if debug_erode:
            cv2.imshow('dilate-erode1', imgThresh)

        imgThresh = cv2.dilate(imgThresh, kernel_dilate2, iterations=1)
        if debug_dilate:
            cv2.imshow('dilate-dilate2', imgThresh)
        imgThresh = cv2.erode(imgThresh, kernel_erode2, iterations=1)
        if debug_erode:
            cv2.imshow('dilate-erode2', imgThresh)

        imgThreshCopy = copy.deepcopy(imgThresh)

        # Contours can be explained simply as a curve joining all the continuous points (along the boundary),
        # having same color or intensity. The contours are a useful tool for shape analysis and object detection and recognition.
        # https://docs.opencv.org/3.1.0/d4/d73/tutorial_py_contours_begin.html
        #im2, contours, hierarchy = cv2.findContours(imgThreshCopy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        imgThreshCopy, contours, hierarchy = cv2.findContours(
            imgThreshCopy, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        drawAndShowContours(imgThreshCopy, contours, 'imgContours')

        #up here we made all processing image stuff and now we need to work with the info we extrated from the image

        #for every thing it's identified on the screen, check if it is a people
        for x in contours:
            convexHull = cv2.convexHull(x)
            blob = Blob(convexHull)
            if (blob.isObject()):
                currentBlobs.append(blob)

        drawAndShowBlobs(imgThresh, currentBlobs, "imgCurrentBlobs")

        if atFrame <= 2:
            #if it is first iteration there is no comparison, add curBlos to blobs
            for curBlob in currentBlobs:
                curBlob.id = Blob.getId()
                blobs.append(curBlob)
        else:
            #otherwise check if the curblob is releated to a previous blob and match them
            matchCurrentFrameBlobsToExistingBlobs(blobs, currentBlobs)

        if debug_all_current_blobs:
            for b in blobs:
                print b

        drawAndShowBlobs(imgThresh, blobs, "imgBlobs")

        imgFrame2Copy = copy.deepcopy(imgFrame2)

        drawBlobInfoOnImage(blobs, imgFrame2Copy)

        #check if the blob crossed the explained
        atLeastOneBlobCrossedTheLine = checkIfBlobsCossedTheLine(
            blobs, line, peopleCount, seenPeople, case)

        #if it has cross draw a colorful line
        if atLeastOneBlobCrossedTheLine:
            cv2.line(imgFrame2Copy, (line[0].x, line[0].y),
                     (line[1].x, line[1].y), (255, 0, 255), 2)  #yellow line
        else:
            cv2.line(imgFrame2Copy, (line[0].x, line[0].y),
                     (line[1].x, line[1].y), (0, 255, 255), 2)

        #draw the counter
        drawPeopleCounterOnImage(peopleCount, imgFrame2Copy, width, height)

        cv2.imshow('imgFrame2Copy', imgFrame2Copy)

        # get ready for next iteration
        del currentBlobs[:]

        imgFrame1 = copy.deepcopy(imgFrame2)

        if ((video.get(cv2.CAP_PROP_POS_FRAMES) + 1) <
            (video.get(cv2.CAP_PROP_FRAME_COUNT))):
            _, imgFrame2 = video.read()
        else:
            print("end of video")
            break

        atFrame += 1
        #print("frame: " +  str(count))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        if debug_mode and cv2.waitKey() & 0xFF == ord('q'):
            break

    video.release()
    cv2.destroyAllWindows()

    print("end")
Exemplo n.º 22
0
    xposv = []
    yposv = []

    xpos = 0
    ypos = 0
    if (NFrame):
        for x in range(int(fils / divfact)):
            for y in range(int(cols / divfact)):
                currentColor = frame[divfact * x][divfact * y]
                d = np.abs(float(currentColor[0]) - float(Ctrack[0])) + np.abs(
                    float(currentColor[1]) - float(Ctrack[1])) + np.abs(
                        float(currentColor[2]) - float(Ctrack[2]))
                if (d < Treshold):
                    if (len(blobs) > 0):
                        for i in range(len(blobs)):
                            auxblob = Blob(divfact * x, divfact * y,
                                           blobTreshold)
                            if (blobs[i].same(auxblob)):
                                blobs[i].expandB(auxblob)
                            else:
                                blobs.append(
                                    Blob(divfact * x, divfact * y,
                                         blobTreshold))
                                count += 1
                    else:
                        blobs.append(
                            Blob(divfact * x, divfact * y, blobTreshold))
        NFrame = False

    if (count > 10):

        for i in range(len(blobs)):
Exemplo n.º 23
0
def addNewBlob(curBlob, blobs):
    curBlob.id = Blob.getId()
    curBlob.isMatchFoundOrNewBlob = True
    blobs.append(curBlob)
Exemplo n.º 24
0
 def _new_blob(self):
     self._last_used_ID += 1
     return Blob(3, self._last_used_ID)
Exemplo n.º 25
0
# Set up buttons
buttonSpacingCoeff = 1 / 7
buttonYValue = SCREEN_Y * 0.75
buttonXValueMulti = (SCREEN_X - Button.width)
numbBlobsButton = Button("Number of Blobs", (255,255,255), buttonXValueMulti * buttonSpacingCoeff, buttonYValue)
visionButton = Button("Vision Radius", (0,255,0), buttonXValueMulti * buttonSpacingCoeff * 2, buttonYValue)
matingSizeButton = Button("Mating Size", (255,0,0), buttonXValueMulti * buttonSpacingCoeff * 3, buttonYValue)
reachedTargetDistanceButton = Button("Target Dedication", (0,0,255), buttonXValueMulti * buttonSpacingCoeff * 4, buttonYValue)
babySizeButton = Button("Baby Size", (0,255,255), buttonXValueMulti * buttonSpacingCoeff * 5, buttonYValue)
speedButton = Button("Speed", (255,255,0), buttonXValueMulti * buttonSpacingCoeff * 6, buttonYValue)

# Spawn initial Blobs
Blob.screenX = SCREEN_X
Blob.screenY = SCREEN_Y
for i in range(0, 75):
	Blob.makeInitialBlob()

# Spawn initial Fruits
for i in range(0, 16):
	fruit = Fruit(SCREEN_X, SCREEN_Y)

# Main Game Loop
loops = 0
running = True
while running:

	# Event Listeners
	for event in pygame.event.get():

		# Controls
		if event.type == pygame.KEYDOWN:
Exemplo n.º 26
0
 def __init__(self, x_boundary, y_boundary):
     Blob.__init__(self, (0, 0, 255), x_boundary, y_boundary)
     logging.info('Blob init with color {}'.format(str(self.color)))
Exemplo n.º 27
0
def blob_detector(img, x_i, y_i):
    og = img.copy()
    img = cv2.Canny(img, 100, 200)

    blobs = []
    #get connected components of fg
    #print ("calculating connected components...")
    output = cv2.connectedComponentsWithStats(img, 4, cv2.CV_32S)
    # Get the results
    # The first cell is the number of labels
    #print ("calculating statistics...")
    num_labels = output[0]
    # The second cell is the label matrix
    labels = output[1]
    # The third cell is the stat matrix
    stats = output[2]
    # The fourth cell is the centroid matrix
    centroids = output[3]
    #print (centroids)
    #print(np.where(labels == 2))
    roundnesses = []
    aspect_ratios = []
    formfactors = []
    errors = []
    rois = []
    #imshow_components(labels, og)
    #print ("calculating connected components properties...")
    # with concurrent.futures.ThreadPoolExecutor(5) as executor:
    #     future_moles = {executor.submit(compute_stats, num_labels,labels, stats, centroids, img, x_i, y_i, i, og):i for i in range(num_labels)}
    #     for future in concurrent.futures.as_completed(future_moles):
    #         found_blobs, roundnesses1, aspect_ratios1, formfactors1, errors1, roi1 = future.result()
    #         if found_blobs != None:
    #             blobs.extend(found_blobs)
    #             roundnesses.append(roundnesses1)
    #             aspect_ratios.append(aspect_ratios1)
    #             formfactors.append(formfactors1)
    #             errors.append(errors1)
    #             rois.append(roi1)
    print("loading model...")
    model = load_model(
        '/Users/2020shatgiskessell/Desktop/New_Mole_Detector/Mole_Detector_1_3/my_mole_model_2.h5'
    )
    for i in range(num_labels):
        #start = timeit.default_timer()
        #get component centroid coordinates
        x, y = centroids[i]
        #compute statistics
        # cv2.CC_STAT_LEFT The leftmost (x) coordinate which is the inclusive start of the bounding box in the horizontal direction.
        # cv2.CC_STAT_TOP The topmost (y) coordinate which is the inclusive start of the bounding box in the vertical direction.
        # cv2.CC_STAT_WIDTH The horizontal size of the bounding box
        # cv2.CC_STAT_HEIGHT The vertical size of the bounding box
        # cv2.CC_STAT_AREA The total area (in pixels) of the connected component

        width = stats[i, cv2.CC_STAT_WIDTH]
        height = stats[i, cv2.CC_STAT_HEIGHT]
        if height > 20:
            continue
        radius = (height + width) / 2
        area = stats[i, cv2.CC_STAT_AREA]

        #these are the top left x, y coordinates = ONLY TO BE USED FOR GETTING ROI
        x_l = stats[i, cv2.CC_STAT_LEFT]
        y_l = stats[i, cv2.CC_STAT_TOP]

        #compute line
        #slope, y_int= compute_lobf(x,y,x_l,y_l)

        #stop = timeit.default_timer()
        #print('Time to calculate properties: ', stop - start)

        #remove everything except for component i to create isolated component matrix
        #get connected component roi
        roi = og[y_l - 1:y_l + height + 1, x_l - 1:x_l + width + 1]
        try:
            roi = cv2.resize(roi, (8, 8))
        except Exception:
            continue
        roi = np.expand_dims(roi, axis=2)
        roi = np.expand_dims(roi, axis=0)
        pred = model.predict(roi)
        pred = pred.round()
        if int(pred[0][0]) == 1:
            print("found blob")
            blob = Blob()
            blob.radius = radius
            blob.x = x + x_i
            blob.y = y + y_i
            #blob.matrix = component_matrix
            blob.roi = roi
            blobs.append(blob)

        # #----------------MEASURES-------------------------------------------------------------------
        #
        # #compute more statistics related to roundness
        # radius = np.sqrt((area/np.pi))
        # formfactor = compute_formfactor (radius, area)
        # bounding_box_area_ratio = area/(height*width)
        # if height > width:
        #     roundness = compute_roundness (height, radius, area)
        #     aspect_ratio = height/width
        # else:
        #     roundness = compute_roundness (width, radius, area)
        #     aspect_ratio = width/height
        #
        # #calculates line of best fit and error
        # try:
        #     cord1, cord2, error = compute_lobf(roi, x_l*y_l)
        #     x1,y1 = cord1
        #     x2,y2 = cord2
        # except TypeError:
        #     print ("cant calculate line of best fit")
        #     error = 0
        # #COMMENT OUT WHEN COLLECTING ANN DATA!!!!!
        # if error < 16:
        #     continue
    return blobs