Exemplo n.º 1
0
    def encuentraYFiltraBlobs(self, tipoDibujo='estructura'):

        imagenBlobs = Image(self.rutaImagenTratada_Fase2).copy()
        blobs = imagenBlobs.findBlobs()
        self.todosLosCandidatos = blobs

        if blobs != []:

            blobs.image = imagenBlobs

            self.areaBlobs = blobs.area()
            blobs = self.filtroPorArea(blobs)
            self.numBlobsCandidatosPorArea = len(blobs)

            # Busca los blobs de forma circular , los blobs que pasan el filtro
            # se guardan en la lista self.articulaciones
            blobs = self.filtroPorForma(blobs)

            if tipoDibujo == 'blobs':
                self.dibujaBlobs(blobs)
            elif tipoDibujo == 'estructura':
                self.listaAngulos = self.encuentraYDibujaAngulos(imagenBlobs)

            # La imagen tratada tiene que ser guardada porque sino no funciona
            # la integracion con Tkinter
            imagenBlobs.save(self.rutaImagenBlobs)
            return Image(self.rutaImagenBlobs)
Exemplo n.º 2
0
def main():
    camera = cv2.VideoCapture('video2.avi')
    background_subtractor = cv2.BackgroundSubtractorMOG()

    # Store previous tracking image
    previous_track_image = Image()

    while camera.isOpened():
        is_success, image = camera.read()
        if is_success:
            mask = background_subtractor.apply(image, None, 0.1)
            # Vehicles will be detected from this image
            track_image = Image(ndimage.median_filter(mask, 3), cv2image=True)

            blobs = track_image.findBlobs(minsize=250, maxsize=800)

            if not blobs:
                # print('No Blobs Found.')
                continue
            else:
                # print("Width: {0}; Height: {1}".format(blobs[0].width(), blobs[0].height()))
                # Only keep near square blobs
                blobs = filter(lambda b: 0.25 < b.width() / b.height() < 4, blobs)

            # print("Found {0} Blobs. {1}".format(len(blobs)))

            if len(vehicle_track_set_list) == 0:
                # Find first batch of blobs
                for blob in blobs:
                    blob.drawRect(color=Color.BLUE, width=3, alpha=225)
                    # bounding_box = tuple(blob.boundingBox())
                    # print("Area: {0}".format(blob.area()))

                    track_set = track_image.track(method='mftrack', img=track_image, bb=blob.boundingBox())
                    if track_set:
                        vehicle_track_set_list.append(VehicleTrackSet(track_set))
                        track_set.drawBB(color=(255, 0, 0))
                        track_set.drawPath()
                        track_image.show()

            else:
                for blob in blobs:
                    blob.drawRect(color=Color.BLUE, width=3, alpha=225)
                    # print("Blob Coordinate: ({0}, {1}).".format(blob.x, blob.y))
                    update_track_set(track_image, previous_track_image, blob.boundingBox())

            # Save current image
            previous_track_image = track_image

            # time.sleep(0.1)
        else:
            camera.release()
            break
Exemplo n.º 3
0
def filter_to_point(filter, imgname):
    img = Image(imgname)

    zones = img.colorDistance(filter).invert()
    blobs = zones.binarize(230)

    blobs.findBlobs().draw(color=Color.BLACK, width=20)
    blobs.save('blobs.bmp')
    blobs_img = Image('blobs.bmp').invert()

    points = blobs_img.findBlobs(minsize=100).coordinates()
    return list(points[0])
Exemplo n.º 4
0
def connectToServerAndHandleConnection():
    
    HOST = 'localhost'
    PORT = 9898
    
    while True:
        try:
            
            sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
            sock.connect((HOST,PORT))
        
            img_str = sock.recv(100000)
            
            nparr = np.fromstring(img_str, np.uint8)
            img_np = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR) # cv2.IMREAD_COLOR in OpenCV 3.1
            
            img_ipl = cv.CreateImageHeader((img_np.shape[1], img_np.shape[0]), cv.IPL_DEPTH_8U, 3)
            cv.SetData(img_ipl, img_np.tostring(), img_np.dtype.itemsize * 3 * img_np.shape[1])
            
            image = Image(img_ipl)
            barcodes = image.findBarcode()
            stringOut = '[]\n'
            if barcodes != None:
                stringOut = ''
                for barcode in barcodes:
                    stringOut += str([barcode.x,barcode.y,int(barcode.length()), int(barcode.width()), barcode.data]) + ';'
                stringOut = stringOut[:-1]
                stringOut += '\n'
            sock.send(stringOut)
            
        except:
            continue
Exemplo n.º 5
0
def test():
    f = Features(**PARAMS)
    minx,maxx = 0,0
    miny,maxy = 0,0
    while True:
        path = 'test/tmp.png'
        screenshot(path, region=REGION)
        im = Image(path)
        f.set_image(im)
        blobs = f.extract_blobs()
        if not blobs[0]:
            break
        dl = f.small.dl()
        bottom_right_corner = blobs[0].bottomRightCorner()
        dl.circle(bottom_right_corner, 5, Color.RED)
        if blobs[1]:
            top_right = blobs[1].topRightCorner()
            x = bottom_right_corner[0] - top_right[0]
            y = bottom_right_corner[1] - top_right[1]
            if x < minx:
                minx = x
            elif x > maxx:
                maxx = x
            if y < miny:
                miny = y
            elif y > maxy:
                maxy = y
            dl.circle(top_right, 5, Color.RED)
        f.small.show()
    print 'minx, maxx', minx, maxx
    print 'miny, maxy', miny, maxy
Exemplo n.º 6
0
def feature_extractor():

    #Abrir arquivo que vai receber as features dos arquivos de imagens
    file = "image_features_train_1.arff"
    text_file = open(file, "a")
    trainPath = "C:/Python27/ARP/Dataset/train2"
    dirList = os.listdir(trainPath)
    edge = EdgeHistogramFeatureExtractor(1)
    for dirName in dirList:
        fileList = os.listdir(trainPath + '/' + dirName)
        maxsize = len(fileList)
        for i in range(0, maxsize):
            #print fileList[i]
            filePATH = (trainPath + '/' + dirName + '/' + fileList[i])
            img = Image(filePATH)
            #Feature - Edge histogram
            edge_vecs = edge.extract(img)
            edge_fields = edge.getFieldNames()
            for y in range(0, edge.getNumFields()):
                #print edge_fields[y], '=', edge_vecs[y]
                text_file.write(str(edge_vecs[y]) + ",")
            #Feature - Hue
            #hue = HueHistogramFeatureExtractor(10)
            #hue_vecs = hue.extract(img)
            #hue_fields = hue.getFieldNames()
            #for i in range(0, hue.getNumFields()):
            #print hue_fields[i], '=', hue_vecs[i]
            #text_file.write(str(hue_vecs[i]) + ",")
            text_file.write(dirName + "\n")
    text_file.close()
Exemplo n.º 7
0
def get_faces(image_path, api_mode=False, rescale_face_crop=0):
    """
    Return a list of cropped faces given an image path

    :param image_path: Path to image
    :type image_path: str
    :param api_mode: If api_mode is True get_faces returns a list of found HaarFeatures
    :type api_mode: bool
    :returns: list of images
    """

    original_image = Image(image_path)
    faces = original_image.findHaarFeatures(segment_face)

    if api_mode:
        return faces
    else:
        if rescale_face_crop:
            return [
                original_image.crop(
                    scale_bounding_box(face.boundingBox(), rescale_face_crop))
                for face in faces
            ]
        else:
            return [face.crop() for face in faces]
Exemplo n.º 8
0
def line_blobs(id):
    cur = g.db.execute(
        'SELECT id, x, y, width, height FROM blobs WHERE image=?', [id])
    blobs = cur.fetchall()
    entries = []
    img = Image("static\\img\\%d.jpg" % id)
    for i, entry in enumerate(blobs):
        blob = img.crop(entry[1], entry[2], entry[3], entry[4])
        if blob and 'SK_MODEL' in app.config:
            if blob.height > blob.width:
                blob = blob.resize(h=app.config['PATCH_SIZE'])
            else:
                blob = blob.resize(w=app.config['PATCH_SIZE'])
            blob = blob.embiggen(
                (app.config['PATCH_SIZE'], app.config['PATCH_SIZE']))
            np_img = blob.getGrayNumpy().transpose().reshape(-1)
            pred = labels.inverse_transform(sk_model.predict(np_img))[0]
            if app.config['DEBUG']:
                blob.save("tmp\\pic%d %s.jpg" % (i, pred))
            entries.append(
                dict(x=entry[1],
                     y=entry[2],
                     width=entry[3],
                     height=entry[4],
                     pred=pred))
        else:
            entries.append(
                dict(x=entry[1], y=entry[2], width=entry[3], height=entry[4]))
    return jsonify(blobs=entries)
def loadData(paths, classes):
    class_dict = generateDictOfClasses(classes)
    all_data = None

    for i in range(len(paths)):
        path = paths[i]
        print path

        for img in os.listdir(path):
            m = re.search('.*Thumbs\.(db)',
                          img)  # in windows XP, this is a problem

            if (m is None):
                img_path = path + "/" + img
                img_data = thresholdOp(Image(img_path))
                flattened = img_data.getNumpy()[:, :,
                                                1].flatten()  # 25x20 (wxh)
                flattened[flattened == 255] = 1  # set every '255' to '1'

                if all_data is None:
                    all_data = ClassificationDataSet(len(flattened),
                                                     nb_classes=len(classes),
                                                     class_labels=classes)

                all_data.addSample(
                    flattened, [class_dict[classes[i]]])  # [data[1],data[2]]

    return all_data
Exemplo n.º 10
0
def test():
    f = Features(**PARAMS)
    for i in range(300, 400):
        path = 'test/' + str(i) + '.png'
        im = Image(path)
        f.set_image(im)
        f.extract_blobs()
Exemplo n.º 11
0
    def find(self):
        new_file_path = self.nfn('dot-blobs')
        img = Image(self.img_path)
        new_img = img.colorDistance((160, 255, 160)).invert().binarize(
            (200, 200, 200)).invert().erode(1)
        for blob in new_img.findBlobs():
            print str(blob) + " --> " + str(
                self.chance_blob_is_an_ellipse(blob))

        dots = sorted(new_img.findBlobs()[-5:],
                      key=lambda blob: blob.centroid()[1])
        for blob in dots:
            blob.draw()
            new_img.dl().circle(blob.centroid(), 5, Color.RED)

        centroids = map(lambda blob: blob.centroid(), dots)

        bottom_screws = sorted(centroids[2:4],
                               key=lambda centroid: centroid[0])

        shoe_measurements = ShoeMeasurements(self.shoe.left_or_right,
                                             centroids[0], centroids[1],
                                             bottom_screws[0],
                                             bottom_screws[1], centroids[4])
        new_img = shoe_measurements.draw_on_img(new_img)
        new_img.save(new_file_path)
        return (new_file_path, shoe_measurements)
Exemplo n.º 12
0
    def detect_digit(self, digit_image):
        """
        Which digit is on the image ?
        """

        detected_digits = []

        # We don't reconize all digits yet
        for digit in [0, 1, 3, 4, 5, 6, 7, 8, 9]:
            template_image = Image("./images/digits/" + str(digit) +
                                   ".png").resize(w=digit_image.width,
                                                  h=digit_image.height)

            diff_image = digit_image - template_image

            diff_index = diff_image.getNumpy().mean()

            detected_digits.append((digit, diff_index))

        sorted_array = sorted(detected_digits, key=itemgetter(1))

        detected_digit = sorted_array[0][0]

        logging.debug("Detected digit " + str(detected_digit))

        return detected_digit
Exemplo n.º 13
0
    def getBackground(self):
        """
        **SUMMARY**

        Get Background of the Image. For more info read 
        http://opencvpython.blogspot.in/2012/07/background-extraction-using-running.html

        **PARAMETERS**
        No Parameters

        **RETURNS**
        
        Image - SimpleCV.ImageClass.Image

        **EXAMPLE**

        >>> while (some_condition):
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> ts.getBackground().show()
        """
        from SimpleCV import Image
        imgs = self.trackImages(cv2_numpy=True)
        f = imgs[0]
        avg = np.float32(f)
        for img in imgs[1:]:
            f = img
            cv2.accumulateWeighted(f, avg, 0.01)
            res = cv2.convertScaleAbs(avg)
        return Image(res, cv2image=True)
Exemplo n.º 14
0
def center(filename):
    image = Image(filename)
    blobs = image.findBlobs()
    box = blobs[0].mBoundingBox
    x = box[0] + box[2] / 2.
    y = box[1] + box[3] / 2.
    return [x, y]
Exemplo n.º 15
0
def ProcessPlateBarcode(fileName):
    from SimpleCV import Image
    import pdb

    timeStamp = ProcessTimeStamp(fileName)
    [fileNameNoExt, fileExtension] = ProcessFileNameExtension(fileName)

    # 	pdb.set_trace()

    img = Image(fileName)

    barcode = img.findBarcode()

    if barcode != None:
        if len(barcode[0].data) >= 2:
            plateID = barcode[0].data[0:-1]
            checkSum = barcode[0].data[-1]
            calculatedChecksum = GenerateCheckSum(plateID)

            if checkSum is not calculatedChecksum:
                print "Error in barcode check sum. File: " + fileName
        else:
            plateID = 'UNKNOWN'
    else:
        plateID = 'UNKNOWN'

    return [plateID, timeStamp, fileExtension]
Exemplo n.º 16
0
def send_email(percentage):
    import smtplib
    from email.MIMEMultipart import MIMEMultipart
    from email.MIMEImage import MIMEImage
    from email.MIMEText import MIMEText

    # Prepare actual message
    msg = MIMEMultipart()
    msg['From'] = "*****@*****.**"  # change to your mail
    msg['To'] = "*****@*****.**"  # change to your mail
    msg['Subject'] = "RPi Camera Alarm!"

    imgcv = Image("image.jpg")
    imgcv.save("imagesend.jpg",
               quality=50)  # reducing quality of the image for smaller size

    img1 = MIMEImage(open("imagesend.jpg", "rb").read(), _subtype="jpg")
    img1.add_header('Content-Disposition', 'attachment; filename="image.jpg"')
    msg.attach(img1)

    part = MIMEText('text', "plain")
    part.set_payload(("Raspberry Pi camera alarm activated with level {:f}"
                      ).format(percentage))
    msg.attach(part)

    try:
        server = smtplib.SMTP("mail.htnet.hr",
                              25)  #change to your SMTP provider
        server.ehlo()
        server.starttls()
        server.sendmail(msg['From'], msg['To'], msg.as_string())
        server.quit()
        print 'Successfully sent the mail'
    except smtplib.SMTPException as e:
        print(e)
Exemplo n.º 17
0
 def getSensors(self):
     screenshot(PATH, REGION)
     im = Image(PATH)
     f.set_image(im)
     sensors = asarray(f.extract())
     self.is_alive = sensors[2]
     return sensors
Exemplo n.º 18
0
 def update(self, dt):
     self.img = GameInstance().cam.getImage().flipHorizontal()
     hsv = self.img.toHSV()
     mask = cv2.inRange(hsv.getNumpy(),
                        GameInstance().lower,
                        GameInstance().upper)
     img2 = Image(source=mask)
     img2 = img2.erode(1)
     img2 = img2.dilate(2)
     blops = img2.findBlobs()
     if blops:
         largest = blops[-1]
         x, y = largest.centroid()
         y = self.size[1] - y
         self.movement_filter(numpy.array([x, y]))
         self.blob_coor[0] = x
         self.blob_coor[1] = y
         if self.last_pos[1] is not None:
             with self.game.canvas:
                 kvColor(1, 1, 0)
                 d = 10.
                 # Ellipse(pos=(x - d / 2, self.blob_coor[1] - d / 2), size=(d, d))
                 Line(points=(self.last_pos[0][0], self.last_pos[0][1],
                              self.last_pos[1][0], self.last_pos[1][1]))
                 self.last_pos[0] = self.last_pos[1]
Exemplo n.º 19
0
def check_image(image_path):
    #Find file by path and import.  File currently resides in same directory.
    image = Image(image_path)
    # grey = image.grayscale()

    instruction = "go"
    array_bounds_possible_widths = [image.width / 4, image.width / 4 * 3]
    shapes_pic_attributes = image.size()

    shapes_pic_size = shapes_pic_attributes[0] * shapes_pic_attributes[1]
    # dist = img.colorDistance(SimpleCV.Color.Black).dilate(2)
    blobs = image.findBlobs()

    #check if the blob is in the middle 1/2 of screen and is too high
    for blob in blobs[:-1]:
        # print blob.contains()

        if (blob.coordinates()[0] > array_bounds_possible_widths[0]
                and blob.coordinates()[0] < array_bounds_possible_widths[1]
            ) and (blob.height() > image.height / 5
                   and blob.coordinates()[1] > image.height / 5 * 2):
            # print grey.height
            print blob.coordinates()[1]
            print "Blob is in the way!!"
            blob.draw(color=(255, 0, 0))
            instruction = "stop"

    # Display the blob until you click on the left side of the picture.
    display = SimpleCV.Display()
    while display.isNotDone():
        image.show()
        if display.mouseLeft:
            break

    return instruction
Exemplo n.º 20
0
 def scale(self, img_path, scale):
     new_file_path = self.nfn('resized')
     img = Image(img_path)
     img = img.scale(scale)
     img.save(new_file_path)
     self.transformations.append(new_file_path)
     return new_file_path
Exemplo n.º 21
0
def getimage(x,y):
    """
    获取百度地图图片
    """
    nowtime=time.time()
    url='http://its.map.baidu.com:8002/traffic/TrafficTileService?time={0}&level=17&x={1}&y={2}'.format(nowtime,x,y)
    try:
        try:
            img=Image(url)
        except:
            img=Image(url)
        insertdata(img,x,y,nowtime)
    except Exception,e:
        print e
        with open('./log/getynpic.log','a') as logfile:
            logfile.write('{0},{1}'.format(x,y))
            logfile.write('\n')
Exemplo n.º 22
0
def imageDifference():
    if not os.path.isfile("image.jpg"):
        subprocess.call("raspistill -n -t 1 -w 640 -h 480 -e jpg -o image.jpg",
                        shell=True)
        time.sleep(0.5)
    img = Image("image.jpg")
    time.sleep(0.2)
    subprocess.call("raspistill -n -t 1 -w 640 -h 480 -e jpg -o image.jpg",
                    shell=True)
    img2 = Image("image.jpg")

    diffimg = img2 - img

    matrix = diffimg.getNumpy()
    flat = matrix.flatten()
    mean = matrix.mean()
    return mean
Exemplo n.º 23
0
 def rotate(self, img_path, shoe_measurements):
     img = Image(img_path)
     new_file_path = self.nfn('rotated')
     img = img.rotate(shoe_measurements.toe_heel_angle(),
                      point=shoe_measurements.cleat_length_intersection())
     self.transformations.append(new_file_path)
     img.save(new_file_path)
     return new_file_path
Exemplo n.º 24
0
    def getSync(self):
        ''' Return a simplecv compatiable synced map with dimensions of the 
        depth map and colours from the colour map. Indexes here match indexes 
        in the vertex map '''

        sync = ds.getSyncMap()
        sync = sync[:, :, ::-1]
        return Image(sync.transpose([1, 0, 2]))
Exemplo n.º 25
0
def unwarp(img):
    # Remap donut to 360 format
    print xmap
    print ymap
    img = Image(img)
    output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
    # Return unwarped image
    return output
Exemplo n.º 26
0
def findFace(imgPath):
	img = Image(imgPath)
	faces = img.findHaarFeatures("face.xml")
	if faces is not None:
		#print("find faces!")
		return True
	else:
		#print("Don't find andy faces!")
		return False
Exemplo n.º 27
0
def images_to_csv_with_label(csv_file, images, label, width, height):
    csv_file.write("label," + ",".join(
        ["pixel" + str(i) for i in range(0,
                                         int(width) * int(height))]) + "\n")
    for i, nameFile in enumerate(images):
        print(nameFile)
        csv_file.write(label + "," + ",".join(
            str(x) for x in misc.img_to_1d_gray(
                Image(nameFile).resize(width, height))) + "\n")
Exemplo n.º 28
0
    def trataImagen(self):

        img = Image(self.rutaImagenReducida)
        result = img.colorDistance(
            (self.ajustes.r, self.ajustes.g, self.ajustes.b))
        result.save(self.rutaImagenTratada_Fase1)
        result = result.invert()
        result = result.binarize(float(self.ajustes.umbralBinarizado)).invert()
        result.save(self.rutaImagenTratada_Fase2)
Exemplo n.º 29
0
 def getSimpleCVImage(self):
     """Grabs a frame from the camera and returns a SimpleCV image object."""
     img = np.empty((self.vRes * self.hRes * 3), dtype=np.uint8)
     self.picam.capture(img, 'bgr', use_video_port=True)
     img = img.reshape((self.vRes, self.hRes, 3))
     img = Image(img, colorSpace=ColorSpace.RGB)
     img = img.rotate90()
     img = img.flipVertical()
     return img
Exemplo n.º 30
0
    def chance_blob_is_an_ellipse(self, blob):
        # Skip blobs that do not have their centroid within the blob.
        if (blob.distanceFrom(blob.centroid()) > blob.radius()):
            return 0.

        ebf = EllipseBestFit(blob.centroid(), blob.contour())
        img = Image(self.img_path)
        ebf.show_best_fit_model(img)
        return ebf.chance_is_elipse()