def testColorImage(self):
        image = ip.Image()

        image.open(COLOR_IMAGE_PATH)

        self.assertIsNotNone(image._image)
        self.assertEqual(COLOR_IMAGE_HEIGHT, image._height)
        self.assertEqual(COLOR_IMAGE_WIDTH, image._width)
    def testGrayscaleImage(self):
        image = ip.Image(GRAYSCALE_IMAGE_PATH, grayscale=True)

        copy = image.copy()

        self.assertNotEqual(id(image._image), id(copy._image))
        self.assertEqual(image._height, copy._height)
        self.assertEqual(image._width, copy._width)
    def testWhiteImage(self):
        HEIGHT = 200
        WIDTH = 300
        image = ip.Image(height=HEIGHT, width=WIDTH, grayscale=True)

        self.assertIsNotNone(image._image)
        self.assertEqual(HEIGHT, image._height)
        self.assertEqual(WIDTH, image._width)
    def testColorImage(self):
        image = ip.Image(COLOR_IMAGE_PATH)

        copy = image.copy()

        self.assertNotEqual(id(image._image), id(copy._image))
        self.assertEqual(image._height, copy._height)
        self.assertEqual(image._width, copy._width)
Exemplo n.º 5
0
def mouseImage(event, x, y, flags, param):
    """Handles incoming mouse input to the Image window."""
    
    if event==cv.CV_EVENT_LBUTTONDOWN:  #Clicked the left button
        print "x, y are", x, y
        (b,g,r) = D.image[y,x]
        print "r,g,b is", int(r), int(g), int(b)
        (h,s,v) = D.hsv[y,x]
        print "h,s,v is", int(h), int(s), int(v)
        D.down_coord = (x,y)
        D.mouse_down = True
        
    elif event==cv.CV_EVENT_LBUTTONUP:  #Let go of the left button
        print "x, y are", x, y
        (b,g,r) = D.image[y,x]
        print "r,g,b is", int(r), int(g), int(b)
        (h,s,v)  = D.hsv[y,x]
        print "h,s,v is", int(h), int(s), int(v)
        D.up_coord = (x,y)
        D.mouse_down = False

        if D.mode == "clear":
            D.sections = []
        else:      #Start, add, or subtract -- put lower coordinates first
            x0, y0, x1, y1 = D.down_coord[0], D.down_coord[1], D.up_coord[0], D.up_coord[1]

            if x0 > x1:
                x0, x1 = x1, x0
            if y0 > y1:
                y0, y1 = y1, y0
            
            if D.mode == "start":
                D.sections = []
            mode_dict = {"start":'a', "add":'a', "subtract":'s'}
            D.sections.append([mode_dict[D.mode], (x0, y0), (x1, y1)])
            ImageProcessing.process_section(D)


    elif event == cv.CV_EVENT_RBUTTONDOWN:                      #Right click
        D.target_coord = (x, y)
        ImageProcessing.target_coord(D)


    elif D.mouse_down and event==cv.CV_EVENT_MOUSEMOVE:      #Mouse just moved
        D.up_coord = (x,y)
Exemplo n.º 6
0
def mouseImage(event, x, y, flags, param):
    """Handles incoming mouse input to the Image window."""
    
    if event==cv.CV_EVENT_LBUTTONDOWN:  #Clicked the left button
        print "x, y are", x, y
        (b,g,r) = D.image[y,x]
        print "r,g,b is", int(r), int(g), int(b)
        (h,s,v) = D.hsv[y,x]
        print "h,s,v is", int(h), int(s), int(v)
        D.down_coord = (x,y)
        D.mouse_down = True
        
    elif event==cv.CV_EVENT_LBUTTONUP:  #Let go of the left button
        print "x, y are", x, y
        (b,g,r) = D.image[y,x]
        print "r,g,b is", int(r), int(g), int(b)
        (h,s,v)  = D.hsv[y,x]
        print "h,s,v is", int(h), int(s), int(v)
        D.up_coord = (x,y)
        D.mouse_down = False

        if D.mode == "clear":
            D.sections = []
        else:      #Start, add, or subtract -- put lower coordinates first
            x0, y0, x1, y1 = D.down_coord[0], D.down_coord[1], D.up_coord[0], D.up_coord[1]

            if x0 > x1:
                x0, x1 = x1, x0
            if y0 > y1:
                y0, y1 = y1, y0
            
            if D.mode == "start":
                D.sections = []
            mode_dict = {"start":'a', "add":'a', "subtract":'s'}
            D.sections.append([mode_dict[D.mode], (x0, y0), (x1, y1)])
            ImageProcessing.process_section(D)


    elif event == cv.CV_EVENT_RBUTTONDOWN:                      #Right click
        D.target_coord = (x, y)
        ImageProcessing.target_coord(D)


    elif D.mouse_down and event==cv.CV_EVENT_MOUSEMOVE:      #Mouse just moved
        D.up_coord = (x,y)
    def testColorImage(self):
        image = ip.Image(COLOR_IMAGE_PATH)
        image[0, 0, 0] = 99
        image[0, 0, 1] = 98
        image[0, 0, 2] = 97

        self.assertEqual(99, image[0, 0, 0])
        self.assertEqual(98, image[0, 0, 1])
        self.assertEqual(97, image[0, 0, 2])
Exemplo n.º 8
0
 def processLabeledImages(self):
     image = self.image_processing_list[0]
     boundries = ImageProcessing.GetLineBounds(image.imageArray)
     line_images_array = []
     for i in range(len(boundries)):
         x, y, w, h = boundries[i]
         cutImage = image.cutImage(image.imageArray, x, y, x + w, y + h)
         line_images_array.append(cutImage)
     Main.userSetLabel(line_images_array, self)
Exemplo n.º 9
0
 def makeHistogram(self):
     if not os.path.exists("key"):
         os.makedirs("key")
     if not os.path.exists("result"):
         os.makedirs("result")
     image = iP.get_image(self.file_path)
     histogram(image)
     histogram_pixmap = QtGui.QPixmap('result/histogram.png').scaledToWidth(320)
     self.label_histogram.setPixmap(histogram_pixmap)
Exemplo n.º 10
0
    def detect_bbox(self, img):
        '''
        [note]   : 文字におけるboundingboxを検出する
        [input]  : img -> boundingboxを検出する画像
        [return] : boundingbox配列
        '''
        # ケニー手法にてエッジ抽出
        img_canny = ip.canny(img, min_threshold=180, max_threshold=255)
        # # 膨張縮小用カーネルを取得(とりあえず3x3)
        kernel = ip.get_kernel((3, 3), molphology="cross")
        # ケニーエッジを膨張
        # 膨張させすぎると周りと結合し認識精度が下がる。
        # あまりさせないとfindContoursのRETR_EXTERNALで文字のエリアがうまく囲めない
        dilate_canny = ip.dilate(img_canny, kernel, iterations=3)
        _, contours, _ = ip.find_contours(dilate_canny,
                                          retr="external",
                                          chain="simple")

        bboxes = []
        cnt = 0

        for contour in contours:
            [x, y, w, h] = ip.get_bounding_rect(contour)

            if w < 10 or h < 10:
                continue

            if w > self.width // 2 or h > self.height // 2:
                continue

            if w > h * 2:
                continue

            bboxes.append({
                "id": cnt,
                "min_x": x,
                "min_y": y,
                "max_x": x + w,
                "max_y": y + h
            })
            cnt += 1

        return np.array(bboxes)
Exemplo n.º 11
0
    def setupUi(self, MainWindow):
        MainWindow.setObjectName(_fromUtf8("MainWindow"))
        MainWindow.resize(830, 630)
        self.centralwidget = QtGui.QWidget(MainWindow)
        self.centralwidget.setObjectName(_fromUtf8("centralwidget"))

        self.label_image = QtGui.QLabel(self.centralwidget)
        self.label_image.setGeometry(QtCore.QRect(10, 0, 480, 480))
        self.label_image.setMinimumSize(QtCore.QSize(480, 480))
        self.label_image.setText(_fromUtf8(""))
        self.label_image.setObjectName(_fromUtf8("label_image"))
        if self.file_path != "":
            image = iP.get_image(self.file_path)
            pixmap = self.parse_image(image, 500)
            self.label_image.setPixmap(pixmap)

        self.button_encrypt = QtGui.QPushButton(self.centralwidget)
        self.button_encrypt.setGeometry(QtCore.QRect(300, 540, 100, 50))
        self.button_encrypt.setObjectName(_fromUtf8("button_encrypt"))
        self.button_encrypt.clicked.connect(self.encrypt)

        self.button_decrypt = QtGui.QPushButton(self.centralwidget)
        self.button_decrypt.setGeometry(QtCore.QRect(500, 540, 100, 50))

        sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(self.button_decrypt.sizePolicy().hasHeightForWidth())

        self.button_decrypt.setSizePolicy(sizePolicy)
        self.button_decrypt.setObjectName(_fromUtf8("button_decrypt"))
        self.button_decrypt.clicked.connect(self.decrypt)

        self.button_chooseFile = QtGui.QPushButton(self.centralwidget)
        self.button_chooseFile.setGeometry(QtCore.QRect(100, 540, 100, 50))
        self.button_chooseFile.setObjectName(_fromUtf8("button_chooseFile"))
        self.button_chooseFile.clicked.connect(self.open_file)

        self.label_histogram = QtGui.QLabel(self.centralwidget)
        self.label_histogram.setGeometry(QtCore.QRect(500, 200, 320, 240))
        self.label_histogram.setMinimumSize(QtCore.QSize(320, 240))
        self.label_histogram.setText(_fromUtf8(""))
        self.label_histogram.setObjectName(_fromUtf8("label_histogram"))

        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtGui.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 830, 21))
        self.menubar.setObjectName(_fromUtf8("menubar"))
        MainWindow.setMenuBar(self.menubar)
        self.statusbar = QtGui.QStatusBar(MainWindow)
        self.statusbar.setObjectName(_fromUtf8("statusbar"))
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)
Exemplo n.º 12
0
 def test_add_alpha(self):
     input_image = cv2.imread("test_1_crop_circle_0.png", 1)
     result_image = cv2.imread("test_1_add_alpha_0.png", -1)
     
     obj = ip.ImageProcessing()
     
     result = obj.add_alpha(input_image)
     
     boolean = np.all(result == result_image)
     
     self.assertTrue(boolean)
    def test_nonanimated_image(self):
        filename = "test_1"

        obj = ip.ImageProcessing()
        result = obj.image_processing(filename)

        comparable = cv2.imread("test_1_unanimated_image_0.png", -1)

        state = np.all(comparable == result[0])

        self.assertTrue(state)
Exemplo n.º 14
0
 def __init__(self, src=0):
     self.stream = cv2.VideoCapture(src)
     (self.grabbed, rawframe) = self.stream.read()
     self.frame = None
     self.monoFrame = None
     self.stopped = False
     fwidth = int( self.stream.get(cv2.CAP_PROP_FRAME_WIDTH) )
     fheight = int( self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT) )
     self.preprocessor = ImageProcessing.VideoPreprocessor(fheight, fwidth)
     self.preprocessor.findSideToCrop()
     self.preprocessor.findCropPoints()
Exemplo n.º 15
0
 def test_get_size(self):
     obj = ip.ImageProcessing()
     
     for i in self.frame_count:
         with self.subTest(i=i):
             
             image = cv2.imread("test_2_frame_{}.png".format(i), 1)       
    
             dimensions = obj.get_size(image)
     
             self.assertEqual(dimensions, (212, 314))
Exemplo n.º 16
0
def _getIconToPaint(configTable, availableSize):
    prcSize = configTable.get_or("sizePercent", 0.2)
    iconSize = QSize(availableSize.width() * prcSize,
                     availableSize.height() * prcSize)
    color = QtGui.QColor(configTable.get_or("color", "black"))
    if not color.isValid():
        color = Qt.black
    opacity = configTable.get_or("opacity", 0.5)

    pixmap = ImageProcessing.coloredPixmap(configTable.get("path"), iconSize,
                                           color, opacity)
    return pixmap
Exemplo n.º 17
0
 def test_trim_square(self):
     input_image = cv2.imread("test_1_pre_trim_square_0.png", 1)
     result_image = cv2.imread("test_1_trim_square_0.png", 1)
     
     obj = ip.ImageProcessing()
     
     center = 240, 320
     result = obj.trim_square(input_image, center, 240)
     
     boolean = np.all(result == result_image)
     
     self.assertTrue(boolean) 
Exemplo n.º 18
0
 def __init__(self, path):
     image = iP.get_image(path)
     self.main_image = image
     self.image = image
     self.height = image.shape[0]
     self.width = image.shape[1]
     self.vector_height = []
     self.vector_width = []
     self.modulo_row = 0
     self.modulo_column = 0
     self.rotated_height_vector = []
     self.rotated_width_vector = []
Exemplo n.º 19
0
 def test_find_min(self):
     x = [212, 314, 0, -1]
     y = [314, 212, 0, -2]
     result = [0, 1, 1, 1]
     
     obj = ip.ImageProcessing()
     
     for i in range(len(result)):
         if obj.find_min(x[i], y[i]) != result[i]:
             self.assertTrue(False)
             
     self.assertTrue(True)  
Exemplo n.º 20
0
def performOCR(inputName):
    # Check if training database has been made before. If it was not, make a new one.
    csv_training_db = "Training_fonts.csv"
    if not os.path.isfile(csv_training_db):
        makeTrainingDataSetFromImage("./Train_image_bulks", "Training_fonts",
                                     "w")

    # Set up the model and perform training loop.
    column_names = [
        'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11',
        'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21',
        'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31',
        'f32', 'character'
    ]
    train_dataset = createTrainingDataSet(csv_training_db, column_names, 16,
                                          csv_training_db)
    train_dataset = train_dataset.map(pack_features_vector)
    model = createEmptyModel(32)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
    global_step = tf.train.get_or_create_global_step()
    doTrainingLoop(200, optimizer, train_dataset, global_step, model)

    # Extract features from the input image.
    class_names = [
        " ", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
        "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a",
        "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o",
        "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "0", "1", "2",
        "3", "4", "5", "6", "7", "8", "9", "*", "/", "+", "-", "#", ".", ",",
        "(", ")", ":", "=", "@"
    ]
    processed = ImageProcessing.processImage(inputName)
    features = []
    for line in processed:
        line_f = []
        for char in line:
            if char.size != 0:
                line_f.append(GetFeatures.getFeatures(char))
        if line_f:
            features.append(line_f)

    with open("text_output.txt", "w") as f:
        for i in range(len(features)):
            predict_dataset = tf.convert_to_tensor(features[i])
            predictions = model(predict_dataset)
            line = ""
            for i, logits in enumerate(predictions):
                class_idx = tf.argmax(logits).numpy()
                p = tf.nn.softmax(logits)[class_idx]
                name = class_names[class_idx]
                line += name
            f.write(line + "\n")
Exemplo n.º 21
0
 def blockImage(self):
     """
     this use the perPoint to create convex hull and logical operation for each slice
     """
     self._logicSlice = ImageProcessing.LogicRegion(self._denoise_volume[:,:,self._denoise_volume.shape[2]//2], self._convex_xy)
     self._block_volume = np.empty_like(self._denoise_volume)
     if self._state == False:
         for i in range(self._denoise_volume.shape[2]):
             self._block_volume[:,:,i] = self._denoise_volume[:,:,i] * self._logicSlice
             
     elif self._state == True:
          for i in range(self._start, self._end+1): 
             self._block_volume[:,:,i] = self._denoise_volume[:,:,i] * self._logicSlice
Exemplo n.º 22
0
    def initCamera(self):
        self.ci = Camera.CameraInterface()  # 获取界面中的相机接口
        self.ci.openCamera()
        self.ci.setAttribute()
        firstFrame = self.ci.getFirstFrame()
        ip = ImageProcessing.ImageProcessing(firstFrame)
        if ip.doHoughTrans():
            self.HOUGH_LINE_Y = ip.C
        else:
            self.HOUGH_LINE_Y = self.IMG_HEIGHT/2

        self.ci.setRoi(self.ROIRANGE, ip.C-self.ROIRANGE/2)
        self.ci.startCapture()
Exemplo n.º 23
0
 def test_image_processing(self):
     result_image = cv2.imread("test_1_image_processing_0.png", -1)
     
     obj = ip.ImageProcessing()
     
     unanimated_image_result = cv2.imread("test_1_generate_circle_0.png", -1)
     obj.generate_circle = MagicMock(return_value=unanimated_image_result)
     
     result = obj.image_processing("test_1")
     
     boolean = np.all(result == result_image)
     
     self.assertTrue(boolean)
Exemplo n.º 24
0
 def test_add_alpha(self):
     for i in self.frame_count:
         with self.subTest(i=i):
             input_image = cv2.imread("test_2_crop_circle_{}.png".format(i), 1)
             result_image = cv2.imread("test_2_add_alpha_{}.png".format(i), -1)
     
             obj = ip.ImageProcessing()
     
             result = obj.add_alpha(input_image)
     
             boolean = np.all(result == result_image)
     
             self.assertTrue(boolean)
Exemplo n.º 25
0
def handle_image_data(data):
    """Handles incoming RGB image data from the Kinect."""
    
    #Get the incoming RGB image from the Kinect
    D.image = D.bridge.imgmsg_to_cv(data, "bgr8")

    if D.created_images == False:
        #Initialize the additional images we need for processing
        ImageProcessing.initialize(D)
        D.created_images = True

    # Recalculate threshold image
    ImageProcessing.threshold_image(D)

    # Recalculate blob in main image
    ImageProcessing.find_biggest_region(D)

    # Check on the display of dragged section
    ImageProcessing.mouse_section(D)

    #Display target circle
    #ImageProcessing.target_coord(D)
    
    #Display info box on image
    ImageProcessing.draw_on_image(D)
    
    #Handle incoming key presses
    key_press = cv.WaitKey(5) & 255
    if key_press != 255:			#Handle only if it's a real key
        check_key_press(D, key_press)		#(255 = "no key pressed")

    #Update the displays:
    #Show main image in the image window
    cv.ShowImage('Image', D.image)

    #Show threshold image in the threshold window
    currentThreshold = getattr(D, D.current_threshold)
    cv.ShowImage('Threshold', currentThreshold)
Exemplo n.º 26
0
def handle_image_data(data):
    """Handles incoming RGB image data from the Kinect."""

    #Get the incoming RGB image from the Kinect
    D.image = D.bridge.imgmsg_to_cv(data, "bgr8")

    if D.created_images == False:
        #Initialize the additional images we need for processing
        ImageProcessing.initialize(D)
        D.created_images = True

    # Recalculate threshold image
    ImageProcessing.threshold_image(D)

    # Recalculate blob in main image
    ImageProcessing.find_biggest_region(D)

    # Check on the display of dragged section
    ImageProcessing.mouse_section(D)

    #Display target circle
    #ImageProcessing.target_coord(D)

    #Display info box on image
    ImageProcessing.draw_on_image(D)

    #Handle incoming key presses
    key_press = cv.WaitKey(5) & 255
    if key_press != 255:  #Handle only if it's a real key
        check_key_press(D, key_press)  #(255 = "no key pressed")

    #Update the displays:
    #Show main image in the image window
    cv.ShowImage('Image', D.image)

    #Show threshold image in the threshold window
    currentThreshold = getattr(D, D.current_threshold)
    cv.ShowImage('Threshold', currentThreshold)
Exemplo n.º 27
0
 def test_trim_square(self):
     for i in self.frame_count:
         with self.subTest(i=i):
             input_image = cv2.imread("test_2_pre_trim_square_{}.png".format(i), 1)
             result_image = cv2.imread("test_2_trim_square_{}.png".format(i), 1)
             
             obj = ip.ImageProcessing()
             
             center = 106, 157
             result = obj.trim_square(input_image, center, 106)
             
             boolean = np.all(result == result_image)
             
             self.assertTrue(boolean) 
Exemplo n.º 28
0
def process_image(image):
    global time_last_email_sent

    faces = ImageProcessing.detect_faces(image)
    print("Processed image.")
    if len(faces) > 0:
        current = time.time()
        if current - time_last_email_sent < email_timeout:
            print("Email timeout in effect %.1f." %
                  (current - time_last_email_sent))
        else:
            print("Sending email.")
            send_email(image)
            time_last_email_sent = current
    def test_animated_image(self):
        filename = "test_2"

        obj = ip.ImageProcessing()
        result = obj.image_processing(filename)

        for i, res in enumerate(result):
            with self.subTest(i=i):
                comparable = cv2.imread(
                    "test_2_animated_image_{}.png".format(i), -1)

                state = np.all(comparable == res)

                self.assertTrue(state)
Exemplo n.º 30
0
    def displayEdgeImg(self, oriImg):  # 边缘显示区域的处理
        if self.isMeasuring == 0:  # 打开系统,但是未选择测量时
            # 边缘显示区域也显示原始图像
            img = q2n.gray2qimage(oriImg)
            img = img.scaled(self.IMG_WIDTH/2, self.ROIRANGE/2, Qt.KeepAspectRatio)
            self.label_canny.setPixmap(QPixmap.fromImage(img))  # 在原始图像显示区域写入图像
            return

        elif self.isMeasuring == 2:  # 仅仅用于演示和显示边缘图像,不对图像进行运算处理
            ip = ImageProcessing.ImageProcessing(oriImg)
            img = cv2.resize(ip.cannyImg, (self.IMG_WIDTH / 2, self.ROIRANGE / 2), cv2.INTER_NEAREST)
            img = q2n.gray2qimage(img)  # 此方法显示会将边缘放大,但实际处理的数据不会改变
            self.label_canny.setPixmap(QPixmap.fromImage(img))  # 在边缘显示区域写入图像
            return

        '''单击【自动测量按钮后】 self.isMeasuring==1的情况'''
        ip = ImageProcessing.ImageProcessing(oriImg)
        topCurve, bottomCurve = dp.getEdgeList(ip.cannyImg)
        # waveHeightByPeak, waveLengthByPeak = dp.getWaveParaByPeak(topCurve, bottomCurve)
        waveHeightBySin, waveLengthBySin = dp.getWaveParaBySin(topCurve, bottomCurve)

        # 显示钢丝波高的值
        waveShow = "%0.1f" % (waveHeightBySin * 6.6188 - 7.356)
        waveShow += "um"
        self.label_wave_height.setText(QString(str(waveShow)))
        img = cv2.resize(ip.cannyImg, (self.IMG_WIDTH / 2, self.ROIRANGE / 2), cv2.INTER_NEAREST)
        img = q2n.gray2qimage(img)  # 此方法显示会将边缘放大,但实际处理的数据不会改变
        self.label_canny.setPixmap(QPixmap.fromImage(img))  # 在边缘显示区域写入图像

        self.cntImgMeasuring += 1
        self.progressBar.setValue(self.cntImgMeasuring)  # 更新进度条当前进度
        self.waveHeightList.append(waveHeightBySin)
        self.waveLengthList.append(waveLengthBySin)  # 计数器和数据记录

        if self.cntImgMeasuring > self.rotatePeriod:  # 测量时间大于设定值
            self.completeMeasure()  # 测量完成进行相应得清零和整理工作
Exemplo n.º 31
0
class TestImageProc(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        pass

    def setUp(self):
        self.ImageProc = ImageProcessing(False)

    def test_get_contours_no_diff(self):
        array = np.zeros([100, 100], dtype=np.uint8)
        contours = self.ImageProc.get_contours_of_moved_objects(array)
        print(len(contours))
        self.assertEqual(len(contours), 0)

    def test_get_contours_diff(self):
        contours = self.ImageProc.get_contours_of_moved_objects(
            np.zeros([100, 100], dtype=np.uint8))
        img = np.zeros([100, 100], dtype=np.uint8)
        img[:] = 255
        contours = self.ImageProc.get_contours_of_moved_objects(img)
        self.assertGreater(len(contours), 0)

    def test_get_median(self):
        self.ImageProc.get_contours_of_moved_objects(
            np.zeros([100, 100], dtype=np.uint8))
        self.ImageProc.get_contours_of_moved_objects(
            np.zeros([100, 100], dtype=np.uint8))
        img = np.zeros([100, 100], dtype=np.uint8)
        img[:] = 255
        self.ImageProc.get_contours_of_moved_objects(img)
        old_image = self.ImageProc.get_median()
        self.assertEqual(old_image[0][0], 0)

    @classmethod
    def tearDownClass(self):
        pass
Exemplo n.º 32
0
 def test_image_processing(self):
     for i in self.frame_count:
         with self.subTest(i=i):
             result_image = cv2.imread("test_2_image_processing_{}.png".format(i), -1)
             
             obj = ip.ImageProcessing()
             
             unanimated_image_result = cv2.imread("test_2_generate_circle_{}.png".format(i), -1)
             obj.generate_circle = MagicMock(return_value=unanimated_image_result)
             
             result = obj.image_processing("test_2")
             
             boolean = np.all(result == result_image)
             
             self.assertTrue(boolean)
Exemplo n.º 33
0
    
    correct_circles = (('OnWater1.jpg',((520,312,100),)),('OnDeck.jpg',((731,416,106),))
        ,('TwoBalls.jpg',((519,308,106),(1534,312,106))))
    circle_comparisons = []
    num_balls = 0
    for file_name,measur_circle in correct_circles:
        #Keep track of the number of balls measured
        num_balls+= len(measur_circle)   
        
        image = InputOutput.get_image(file_name)
        InputOutput.display_image(image,"Starting Image")

        image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # convert to HSV for thresholding
        image_hsv = cv2.blur(image_hsv, (9,9)) # blur to reduce noise
 
        thresh = ImageProcessing.thresholdRed(image_hsv)
        InputOutput.display_image(thresh,"Thresholded")
        thresh = ImageProcessing.removeNoise(thresh)    
    
        #calc_circles = ImageProcessing.houghTransform(thresh)
        calc_circles = ImageProcessing.blob_circle_detection(thresh)
        if calc_circles == None:
            calc_circles = []
        #calc_circles = ImageProcessing.get_blob_centroid(image, thresh, 5)
         
        InputOutput.display_image_with_circles(image,"Result!",calc_circles)
        
        #evalueateSuccess by comparing the circle position with the most similar
        #red ball's measured circle.
        
        #Print basic file facts
Exemplo n.º 34
0
import numpy as np
import cv2

import ImageProcessing

handImg = cv2.imread("../data/hand.png")
eyeImg = cv2.imread("../data/eye.png")
maskImg = cv2.imread("../data/mask.png")

#zmiana obrazka kolorowego na obrazek w skali szarosci
mask = np.mean(maskImg, axis=2)

eyeImg = ImageProcessing.colorTransfer(handImg, eyeImg, mask)
blendedImg = ImageProcessing.blendImages(eyeImg, handImg, mask)

cv2.imwrite("../eyeHandBlend.jpg", blendedImg)
Exemplo n.º 35
0
    if shapes2D is not None:
        for shape2D in shapes2D:
            #3D model parameter initialization
            modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], shape2D[:, idxs2D])

            #3D model parameter optimization
            modelParams = NonLinearLeastSquares.GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian, ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], shape2D[:, idxs2D]), verbose=0)

            #rendering the model to an image
            shape3D = utils.getShape3D(mean3DShape, blendshapes, modelParams)
            renderedImg = renderer.render(shape3D)

            #blending of the rendered face with the image
            mask = np.copy(renderedImg[:, :, 0])
            renderedImg = ImageProcessing.colorTransfer(cameraImg, renderedImg, mask)
            cameraImg = ImageProcessing.blendImages(renderedImg, cameraImg, mask)
       

            #drawing of the mesh and keypoints
            if drawOverlay:
                drawPoints(cameraImg, shape2D.T)
                drawProjectedShape(cameraImg, [mean3DShape, blendshapes], projectionModel, mesh, modelParams, lockedTranslation)

    if writer is not None:
        writer.write(cameraImg)

    cv2.imshow('image', cameraImg)
    key = cv2.waitKey(1)

    if key == 27: