コード例 #1
0
ファイル: turn.py プロジェクト: renj/TrajMap
def get_G(x0, x1, y0, y1, width, height, side, data):
    G = nx.Graph()
    min_x, max_x, min_y, max_y = x0, x1, y0, y1
    col = []
    row = []
    for tid in data.tid.unique():
        themap = cv.CreateMat(height, width, cv.CV_16UC1)
        cv.SetZero(themap)
        for p in trajmap.pairwise(data[data.tid == tid].values):
            x0, y0, x1, y1 = p[0][1], p[0][2], p[1][1], p[1][2]
            oy = height - int((y0 - min_y) / side)
            ox = int((x0 - min_x) / side)
            dy = height - int((y1 - min_y) / side)
            dx = int((x1 - min_x) / side)
            cv.Line(themap, (ox, oy), (dx, dy), (32), 1, cv.CV_AA)
        node_set = set()
        for y, x in zip(*np.matrix(themap).nonzero()):
            node_set.add((x, y))
            a = x + (height - y) * width
            for _x, _y in [(x - 1, y), (x, y - 1), (x - 1, y - 1), (x + 1, y),
                           (x, y + 1), (x + 1, y + 1), (x - 1, y + 1),
                           (x + 1, y - 1)]:
                if (_x, _y) in node_set:
                    _a = _x + (height - _y) * width
                    G.add_edge(a, _a)
        for tup in zip(*np.matrix(themap).nonzero()):
            row.append(tup[1] + (height - tup[0]) * width)
            col.append(tid)
    sag = scipy.sparse.csc_matrix(([1] * len(row), (row, col)),
                                  shape=(max(row) + 1, max(col) + 1))
    return sag, G
コード例 #2
0
def on_trackbar(position):

    # create the image for putting in it the founded contours
    contours_image = cv.CreateImage((_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours

    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next().h_next().h_next()

    # first, clear the image where we will draw contours
    cv.SetZero(contours_image)

    # draw contours in red and green
    cv.DrawContours(contours_image, _contours, _red, _green, levels, 3,
                    cv.CV_AA, (0, 0))

    # finally, show the image
    cv.ShowImage("contours", contours_image)
コード例 #3
0
ファイル: views.py プロジェクト: sardine2/RUNMAP
def url_jpg_contours(url):
    position = 100
    filedata = urllib2.urlopen(url).read()
    imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
    cv.SetData(imagefiledata, filedata, len(filedata))
    im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
    col_edge = cv.CreateImage((im.width, im.height), 8, 3)

    # convert to grayscale
    gray_im = cv.CreateImage((im.width, im.height), 8, 1)
    edge_im = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY)
    cv.Canny(gray_im, edge_im, position, position * 3, 3)
    cv.SetZero(col_edge)
    # copy edge points
    cv.Copy(im, col_edge, edge_im)
    edge_im_array = np.asarray(edge_im[:])

    ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255,
                                       cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(edge_im_array, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    scale = 10000.0
    points = []
    for contour in contours:
        for i in contour:
            for j in i:
                lng_offset = j[0] / scale
                lat_offset = j[1] / scale
                points.append([lng_offset, lat_offset])
    return points
コード例 #4
0
def on_trackbar(position):

    # create the image for putting in it the founded contours
    contours_image = cv.CreateImage((_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours

    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next().h_next().h_next()

    # first, clear the image where we will draw contours
    cv.SetZero(contours_image)

    # draw contours in red and green
    cv.DrawContours(
        contours_image,  #dest image
        _contours,  #input contours
        _red,  #color of external contour
        _green,  #color of internal contour
        levels,  #maxlevel of contours to draw
        _contour_thickness,
        cv.CV_AA,  #line type
        (0, 0))  #offset

    # finally, show the image
    cv.ShowImage("contours", contours_image)
コード例 #5
0
ファイル: ImageMixer.py プロジェクト: perchrn/TaktPlayer
    def __init__(self, internalResolutionX, internalResolutionY, numCameras, configHolder):
        self._internalResolutionX =  internalResolutionX
        self._internalResolutionY =  internalResolutionY
        self._videoDir = configHolder.getVideoDir()

        self._selectedCameraId = 0
        self._currentNumCameras = numCameras

        self._miniSizeX = self._internalResolutionX / 5
        self._miniSizeY = self._internalResolutionY / 5
        self._numMiniRows = int(self._internalResolutionY / self._miniSizeY)
        self._numMiniColumns = 1 + int(numCameras / self._numMiniRows)
        self._maxImages = self._numMiniColumns * self._numMiniRows
        self._miniAreaWidth = self._numMiniColumns * self._miniSizeX
        self._bigAreaWidth = self._internalResolutionX - self._miniAreaWidth
        self._bigAreaHeight = int((float(self._bigAreaWidth) / self._internalResolutionX) * self._internalResolutionY)
        self._bigAreaTop = int((self._internalResolutionY - self._bigAreaHeight) / 2)

        self._mixMat = createMat(self._internalResolutionX, self._internalResolutionY)
        self._convertedMat = createMat(self._internalResolutionX, self._internalResolutionY)
        cv.SetZero(self._mixMat)

        self._bigRegion = cv.GetSubRect(self._mixMat, (0, self._bigAreaTop, self._bigAreaWidth, self._bigAreaHeight))
        self._smallImageAreaList = []
        self._cameraBaseFileNameList = []
        for i in range(self._maxImages):
            columnId = int(i / self._numMiniRows)
            xpos = self._bigAreaWidth + (columnId * self._miniSizeX)
            ypos = int(i % self._numMiniRows) * self._miniSizeY
            smallRegion = cv.GetSubRect(self._mixMat, (xpos, ypos, self._miniSizeX, self._miniSizeY))
            self._smallImageAreaList.append(smallRegion)
            self._cameraBaseFileNameList.append("cam" + str(i) + "_")

        self._debugCounter = 0
コード例 #6
0
def convertto():
    image=cv.LoadImageM(getpath(),cv.CV_LOAD_IMAGE_COLOR)
    newimage=cv.CreateMat(image.rows,image.cols,image.type)
    cv.SetZero(newimage)
    cv.ConvertScale(image,newimage,2.2,50.0)
    display(image,"Source")
    display(newimage,"Destination")
    cv.WaitKey(0)
コード例 #7
0
    def cal_fromcorners(self, good):
        """
        :param good: Good corner positions and boards
        :type good: [(corners, ChessboardInfo)]


        """
        boards = [b for (_, b) in good]

        ipts = self.mk_image_points(good)
        opts = self.mk_object_points(boards)
        npts = self.mk_point_counts(boards)

        intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
        if self.calib_flags & cv2.CALIB_RATIONAL_MODEL:
            distortion = cv.CreateMat(8, 1, cv.CV_64FC1)  # rational polynomial
        else:
            distortion = cv.CreateMat(5, 1, cv.CV_64FC1)  # plumb bob
        cv.SetZero(intrinsics)
        cv.SetZero(distortion)
        # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
        intrinsics[0, 0] = 1.0
        intrinsics[1, 1] = 1.0
        cv.CalibrateCamera2(opts,
                            ipts,
                            npts,
                            self.size,
                            intrinsics,
                            distortion,
                            cv.CreateMat(len(good), 3, cv.CV_32FC1),
                            cv.CreateMat(len(good), 3, cv.CV_32FC1),
                            flags=self.calib_flags)
        self.intrinsics = intrinsics
        self.distortion = distortion

        # R is identity matrix for monocular calibration
        self.R = cv.CreateMat(3, 3, cv.CV_64FC1)
        cv.SetIdentity(self.R)
        self.P = cv.CreateMat(3, 4, cv.CV_64FC1)
        cv.SetZero(self.P)

        self.mapx = cv.CreateImage(self.size, cv.IPL_DEPTH_32F, 1)
        self.mapy = cv.CreateImage(self.size, cv.IPL_DEPTH_32F, 1)
        self.set_alpha(0.0)
コード例 #8
0
def medianfiltering():
    src = cv.LoadImageM(k, cv.CV_LOAD_IMAGE_COLOR)
    dst = cv.CreateImage((src.width, src.height), 8, src.channels)
    cv.SetZero(dst)
    cv.NamedWindow("Median Filtering", 1)
    cv.NamedWindow("After Filtering", 1)
    cv.Smooth(src, dst, cv.CV_MEDIAN, 9, 9)
    cv.ShowImage("Median Filtering", src)
    cv.ShowImage("After Filtering", dst)
    cv.WaitKey(0)
コード例 #9
0
def on_trackbar(position):

    cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.Not(gray, edge)

    # run the edge dector on gray scale
    cv.Canny(gray, edge, position, position * 3, 3)

    # reset
    cv.SetZero(col_edge)

    # copy edge points
    cv.Copy(im, col_edge, edge)

    # show the im
    cv.ShowImage(win_name, col_edge)
コード例 #10
0
def drawrandline():
    rand = Random()
    img = cv.CreateImage((700, 1000), 8, 3)
    cv.SetZero(img)
    cv.NamedWindow("RandomViewer", 1)
    for i in range(100):
        cv.Line(img, (rand.randrange(0, 700), rand.randrange(0, 1000)),
                (300, 200), (rand.randrange(0, 256), rand.randrange(
                    0, 256), rand.randrange(0, 256)), 1, 8, 0)
        cv.ShowImage("RandomViewer", img)
        cv.WaitKey(5)
    cv.PutText(img, "Hello OpenCV", (100, 200),
               cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 5, 10, 0, 1, 8),
               (255, 255, 255))
    cv.ShowImage("RandomViewer", img)
    cv.WaitKey(0)
    cv.DestroyWindow("RandomViewer")
コード例 #11
0
    def on_trackbar(self, position):

        cv.Smooth(self.source_image, self.edge, cv.CV_BLUR, 3, 3, 0)
        cv.Not(self.source_image, self.edge)

        # run the edge dector on gray scale
        cv.Canny(self.source_image, self.edge, position, position * 3, 3)

        # reset
        cv.SetZero(self.col_edge)

        # copy edge points
        cv.Copy(self.source_color, self.col_edge, self.edge)

        # show the im
        cv.ShowImage(win_name, self.col_edge)
        self.process_image(position)
コード例 #12
0
def on_contour(position):
    # compute the real level of display, given the current position
    levels = position-3

    # initialisation
    _contours = contours

    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next().h_next().h_next()

    # first, clear the image where we will draw contours
    cv.SetZero (contours_image)

    # draw contours in red and green
    cv.DrawContours (contours_image, _contours,_white, _green,levels, 1, cv.CV_AA,(0, 0))

    # finally, show the image
    cv.ShowImage ("contours", contours_image)
コード例 #13
0
# three windows that will open upon execution
cv.NamedWindow("Real",0)

# blank lists to store coordinates of blue blob
blue   = []


while(1):
	# captures feed from video in color
	color_image = cv.QueryFrame(capture)
	
	# ??
	imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
	
	# ??
	cv.SetZero(imdraw)
	cv.Flip(color_image,color_image, 1)
	cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
	# ??
	imgbluethresh = getthresholdedimg(color_image)
	cv.Erode(imgbluethresh, imgbluethresh, None,  3)
	cv.Dilate(imgbluethresh, imgbluethresh, None, 10)
	# ??
	img2 = cv.CloneImage(imgbluethresh)
	# ??
	storage = cv.CreateMemStorage(0)
	contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
	
	# blank list into which points for bounding rectangles around blobs are appended
	points = []	
コード例 #14
0
def procImg(img, sideName, dispFlag):

    #creates empty images of the same size
    imdraw = cv.CreateImage(cv.GetSize(img), 8, 3)
    #put the smoothed image here
    imgSmooth = cv.CreateImage(cv.GetSize(img), 8, 3)

    cv.SetZero(imdraw)
    cv.Smooth(img, imgSmooth, cv.CV_GAUSSIAN, 3, 0)  #Gaussian filter the image
    imgbluethresh = getthresholdedimg(
        imgSmooth)  #Get a color thresholed binary image
    cv.Erode(imgbluethresh, imgbluethresh, None, 3)
    cv.Dilate(imgbluethresh, imgbluethresh, None, 10)
    #img2 = cv.CloneImage(imgbluethresh)
    storage = cv.CreateMemStorage(0)
    contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP,
                              cv.CV_CHAIN_APPROX_SIMPLE)

    centroidx = 0
    centroidy = 0
    prevArea = 0
    pt1 = (0, 0)
    pt2 = (0, 0)

    while contour:
        #find the area of each collection of contiguous points (contour)
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()

        #get the largest contour
        area = bound_rect[2] * bound_rect[3]

        if dispFlag:
            print("Area= " + str(area))

        if (area > prevArea and area > 3000):
            pt1 = (bound_rect[0], bound_rect[1])
            pt2 = (bound_rect[0] + bound_rect[2],
                   bound_rect[1] + bound_rect[3])

    # Draw bounding rectangle
    cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3)

    # calculating centroid
    centroidx = cv.Round((pt1[0] + pt2[0]) / 2)
    centroidy = cv.Round((pt1[1] + pt2[1]) / 2)

    if (centroidx == 0 or centroidy == 0):
        print("no blimp detected from " + sideName)
    else:
        print(sideName + " centroid x:" + str(centroidx))
        print(sideName + " centroid y:" + str(centroidy))

    print("")

    if dispFlag:
        small_thresh = cv.CreateImage(
            (int(0.25 * cv.GetSize(imgbluethresh)[0]),
             int(0.25 * cv.GetSize(imgbluethresh)[1])), 8, 1)
        cv.Resize(imgbluethresh, small_thresh)
        cv.ShowImage(sideName + "_threshold", small_thresh)
        cv.WaitKey(100)

        small_hsv = cv.CreateImage((int(
            0.25 * cv.GetSize(imghsv)[0]), int(0.25 * cv.GetSize(imghsv)[1])),
                                   8, 3)
        cv.Resize(imghsv, small_hsv)
        cv.ShowImage(sideName + "_hsv", small_hsv)
        cv.WaitKey(100)

    return (centroidx, centroidy)
コード例 #15
0
import cv2
from cv2 import cv
from database import *

src_image = cv.LoadImageM(getpath(), cv.CV_LOAD_IMAGE_COLOR)
dst_image = cv.CreateImage((src_image.width, src_image.height), 8,
                           src_image.channels)
cv.SetZero(dst_image)


#src_image=cv.LoadImageM("C:\\Users\\raj\\Desktop\\image processing and computer vision\\pictures\\s4.jpg")
def display(img, name):
    cv.NamedWindow(name, 1)
    cv.ShowImage(name, img)
コード例 #16
0
def url_jpg_contours():
    url = 'http://i12.tietuku.com/05ef0b29030fa46c.jpg'
    filedata = urllib2.urlopen(url).read()
    imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
    print imagefiledata  #<cvmat(type=42424000 8UC1 rows=1 cols=48230 step=48230 )>
    cv.SetData(imagefiledata, filedata, len(filedata))
    im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
    col_edge = cv.CreateImage((im.width, im.height), 8, 3)

    # convert to grayscale
    gray_im = cv.CreateImage((im.width, im.height), 8, 1)
    edge_im = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY)
    cv.Canny(gray_im, edge_im, position, position * 3, 3)
    cv.SetZero(col_edge)
    # copy edge points
    cv.Copy(im, col_edge, edge_im)
    #ret, edge_jpg = cv2.imencode('.jpg', edge_im, [int(cv.CV_IMWRITE_JPEG_QUALITY), 80])
    edge_im_array = np.asarray(edge_im[:])

    print type(edge_im_array)
    #edge_jpg_gray = cv2.cvtColor(edge_im_array,cv2.COLOR_BGR2GRAY)
    ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255,
                                       cv2.THRESH_BINARY)
    print type(edge_im_array)
    contours, hierarchy = cv2.findContours(
        edge_im_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
    )  #压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
    contours_img = cv2.cvtColor(edge_im_array, cv2.COLOR_GRAY2BGR)
    url_str_len_contours = str(len(contours))  #取轮廊数量
    str_len_contours = str(len(contours))  #取轮廊数量

    #数据处理

    first_contours = contours[0]  #第一条轨迹坐标集合,数据格式为numpy.ndarry

    first_contours_list = first_contours.tolist()
    #print contours                #输出所有轨迹坐标集合
    #print contours[-1]          #输出最后一条轨迹坐标,数据格式为numpy.ndarry
    #print contours[0][0].tolist()[0] #输出第一条轨迹起始点坐标[[375 241]]并转化成list格式[[375,241]] |**.tolist()[0] 可以省掉一个中括号输出[375,241]
    #print contours[0][0].tolist()[0][0] #输出第一条轨迹起始点坐标的X坐标值。
    #print contours[0][0].tolist()[0][1] #输出第一条轨迹起始点坐标的Y坐标值。

    #print [i[0][0] for i in contours]
    #print [i[0][0] for i in contours[0]]

    scale = 1  #不缩放
    contours_img = cv2.resize(contours_img, (0, 0), fx=scale, fy=scale)
    print "Url_jpg_contours_num:%s" % url_str_len_contours
    for cnt in contours:
        color = np.random.randint(0, 255, (3)).tolist()
        cv2.drawContours(contours_img, [cnt * scale], 0, color, 1)
    cv2.imshow("URL_canny_img", edge_im_array)
    cv2.imshow("URL_contours_img", contours_img)

    #轮廊清单转文本输出
    edge_im_array_pix = str(np.size(edge_im_array))
    contours_img_pix = str(np.size(contours_img))

    ss = open("Contours" + ".log", 'w')
    ss.write("edge_im_array_pix nums:" + "%s" % edge_im_array_pix + "\n")
    ss.write("contours_img_pix nums:" + "%s" % contours_img_pix + "\n")
    ss.write("_url_contours num:" + "%s" % str_len_contours + "\n")
    for ele in contours:
        ss.write("%s" % ele)
    ss.write("**" * 50 + "\n")
    ss.close()
    #return contours
    cv2.waitKey(0)
コード例 #17
0
    def process_image(self, slider_pos):
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        use_this = self.source_image
        if self.intensity == False:
            cv.Smooth(self.source_image, self.edge, cv.CV_BLUR, 9, 9, 0)
            cv.Not(self.source_image, self.edge)

            # run the edge dector on gray scale
            cv.Canny(self.source_image, self.edge, slider_pos, slider_pos * 3,
                     3)

            # reset
            cv.SetZero(self.col_edge)

            # copy edge points
            cv.Copy(self.source_color, self.col_edge, self.edge)
            use_this = self.edge

        stor = cv.CreateMemStorage()

        # Create the destination images
        image02 = cv.CloneImage(use_this)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image),
                                 cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(use_this, image02, slider_pos, 255, cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_NONE, (0, 0))

        for c in contour_iterator(cont):
            # Number of points must be more than or equal to 6 for cv.FitEllipse2
            if len(c) >= 6:
                # Copy the contour into an array of (x,y)s
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0))

                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                # Draw ellipse in random color
                color = cv.CV_RGB(random.randrange(256), random.randrange(256),
                                  random.randrange(256))
                cv.Ellipse(image04, center, size, angle, 0, 360, color, 2,
                           cv.CV_AA, 0)

        # Show image. HighGUI use.
        cv.ShowImage("Result", image04)
コード例 #18
0
    #f = open(os.path.splitext(files)[0]+".svg",'w')
    #f.write( startSvgTag + base64String + endSvgTag)
    #print 'Converted '+ files + ' to ' + os.path.splitext(files)[0]+".svg"

    #log code canny to file for progress
    with open('C:\\3d-Model\\bin\\segmentation_files\\progress.txt',
              'w') as myFile:
        myFile.write("canny")

    #CONTOUR  MAKING CODE

    # create the image where we want to display results
    image = cv.CreateImage((_SIZE, _SIZE), 8, 1)

    # start with an empty image
    cv.SetZero(image)

    im = cv.LoadImage("C:\\3d-Model\\bin\\segmentation_files\\pic_seg.jpg",
                      cv.CV_LOAD_IMAGE_COLOR)
    image = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, image, cv.CV_BGR2GRAY)
    threshold = 51
    colour = 255
    cv.Threshold(image, image, threshold, colour, cv.CV_THRESH_BINARY)

    # create the window for the contours
    cv.NamedWindow("contours", cv.CV_WINDOW_NORMAL)

    # create the trackbar, to enable the change of the displayed level
    cv.CreateTrackbar("levels+3", "contours", 3, 7, on_contour)
コード例 #19
0
cv.CvtColor(image, img_grayscale, cv.CV_RGB2GRAY)
cv.ShowImage("converted-image", img_grayscale)

img_contour = cv.CreateImage(img_size, 8, 3)

# find the contours
contours = cv.FindContours(img_grayscale, storage, cv.CV_RETR_TREE,
                           cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
for i in contours:
    print i
contours = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP, 8, 1)

levels = 2

# first, clear the image where we will draw contours
cv.SetZero(img_contour)

# initialisation
_contours = contours

# draw contours in red and green
cv.DrawContours(
    img_contour,  #dest image
    _contours,  #input contours
    _red,  #color of external contour
    _green,  #color of internal contour
    levels,  #maxlevel of contours to draw
    _contour_thickness,
    cv.CV_AA,  #line type
    (0, 0))  #offset
コード例 #20
0
    x0 = [0.]
    #print silly(x0, goodcorners)
    print "initial error", silly(x0, goodcorners)
    xopt = fmin(silly, x0, args=(goodcorners, ))
    print "xopt", xopt
    print "final error", silly(xopt, goodcorners)

    d = 1.0  # - sum(xopt)
    poly = numpy.poly1d(list(xopt) + [d, 0.])
    print "final polynomial"
    print poly

    for co in goodcorners:
        scrib = cv.CreateMat(480, 640, cv.CV_8UC3)
        cv.SetZero(scrib)
        cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints),
                                 [xf(pt, poly) for pt in co], True)
        cv.ShowImage("snap", scrib)
        cv.WaitKey()

    sys.exit(0)

for (i, (img, (ok, co))) in enumerate(zip(images, corners)):
    scrib = cv.CreateMat(img.rows, img.cols, cv.CV_8UC3)
    cv.CvtColor(img, scrib, cv.CV_GRAY2BGR)
    if ok:
        cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), co, True)
    cv.ShowImage("snap", scrib)
    cv.WaitKey()
コード例 #21
0
    def stereoCalibrate(self):
        print " rows " + str(rows)
        print "columns " + str(columns)
        print "points " + str(num_pts)
        #         nimages = 8
        #         num_pts = 5
        
        #       (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F || CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) 
        #         && ((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) || 
        #               (_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) 
        
        #       CV_32FC1 == CV_32F, CV_32FC2 == CV_32FC(2) == CV_MAKETYPE(CV_32F, 2), and CV_MAKETYPE(depth, n) == ((x&7)<<3) + (n-1). 
        #       This means that the constant type is formed from the depth, taking the lowest 3 bits, and the number of channels minus 1,
        #       taking the next log2(CV_CN_MAX) bits.
        opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
        ipts1 = cv.CreateMat(nimages * num_pts, 2, cv.CV_32F)
        ipts2 = cv.CreateMat(nimages * num_pts, 2, cv.CV_32F)
        #         ipts1 = cv.CreateMat(nimages * num_pts, 2, cv.CV_32F)
        #         ipts2 = cv.CreateMat(nimages * num_pts, 2, cv.CV_32F)
        npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
        for i in range(0, nimages):
            npts[i, 0] = num_pts
          
        # Create first Intrinsic Camera Matrix and Distortion Matrix
        intrinsics1 = cv.CreateMat(3, 3, cv.CV_64FC1)
        distortion1 = cv.CreateMat(4, 1, cv.CV_64FC1)
          
        cv.SetZero(intrinsics1)
        cv.SetZero(distortion1)
        intrinsics1[0, 0] = 1.0
        intrinsics1[1, 1] = 1.0
          
        # Create second Intrinsic Camera Matrix and Distortion Matrix
        intrinsics2 = cv.CreateMat(3, 3, cv.CV_64FC1)
        distortion2 = cv.CreateMat(4, 1, cv.CV_64FC1)
          
        cv.SetZero(intrinsics2)
        cv.SetZero(distortion2)
        # focal lengths have 1/1 ratio
        intrinsics2[0, 0] = 1.0
        intrinsics2[1, 1] = 1.0
        
        # CV_64F CV_32FC1 
        R = cv.CreateMat(3, 3, cv.CV_64F)
        T = cv.CreateMat(3, 1, cv.CV_64F)
        E = cv.CreateMat(3, 3, cv.CV_64F)
        F = cv.CreateMat(3, 3, cv.CV_64F)
        
        
        #         print type(points)
        #         imagePoints1 = np.asarray(points, np.uint8 , 3) 
        #         imagePoints2 = np.asarray(points2, np.uint8, 3)
        #         print "HERE " + str(type(imagePoints2))
        #         imagePoints11 = imagePoints1.T
        #         imagePoints22 = imagePoints2.T
        #         imagePoints111 = cv.fromarray(imagePoints11)
        #         imagePoints222 = cv.fromarray(imagePoints22)
        
        print np.shape(opts)
#         print np.shape(points)
        print np.shape(self.pointsArray1)
        print type(self.pointsArray2)
        
        for k in range(0, nimages):
            for i in range(0, columns):
                for j in range(0, rows):
                    print (j * columns + i)
                    opts[k * num_pts + j * columns + i, 0] = j * 10
                    opts[k * num_pts + j * columns + i, 1] = i * 10
                    opts[k * num_pts + j * columns + i, 2] = 0
                    ipts1[k * num_pts + j * columns + i, 0] = self.pointsArray1[k, j * columns + i][0]
                    ipts1[k * num_pts + j * columns + i, 1] = self.pointsArray1[k, j * columns + i][1]
                    ipts2[k * num_pts + j * columns + i, 0] = self.pointsArray2[k, j * columns + i][0]
                    ipts2[k * num_pts + j * columns + i, 1] = self.pointsArray2[k, j * columns + i][1]
                
                
        
                
        #         print np.shape(imagePoints111)
        
        # cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC) 
        cv.StereoCalibrate(opts, ipts1, ipts2 , npts, intrinsics1, distortion1, intrinsics2, distortion2, self.size, R, T, E, F, (cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS, 30, 1e-6), cv.CV_CALIB_FIX_INTRINSIC)
        
        size = self.size
        R1 = cv.CreateMat(3, 3, cv.CV_64F)
        R2 = cv.CreateMat(3, 3, cv.CV_64F)
        P1 = cv.CreateMat(3, 4, cv.CV_64F)
        P2 = cv.CreateMat(3, 4, cv.CV_64F)
        self.Q = cv.CreateMat(4, 4, cv.CV_64F)
        (roi1, roi2) = cv.StereoRectify(intrinsics1, intrinsics2, distortion1, distortion2, size, R , T, R1, R2, P1, P2, self.Q)
        
        [mat_w, mat_h] = self.size
          
        self.map1x = cv.CreateMat(mat_h, mat_w, cv.CV_32FC1)
        self.map2x = cv.CreateMat(mat_h, mat_w, cv.CV_32FC1)
        # Right maps
        self.map1y = cv.CreateMat(mat_h, mat_w, cv.CV_32FC1)
        self.map2y = cv.CreateMat(mat_h, mat_w, cv.CV_32FC1)
        # cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
        cv.InitUndistortRectifyMap(intrinsics1, distortion1, R1, P1, self.map1x, self.map1y)
        cv.InitUndistortRectifyMap(intrinsics2, distortion2, R2, P2, self.map2x, self.map2y)       
コード例 #22
0
    def tangent(self, dx, dy, Mask=None, method="cv"):
        '''This function calculates the gradient orientation of each pixel 
        that is in Mask'''

        tangent = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_8U, 1)
        divSize = cv.GetSize(dx)
        tangent16U = cv.CreateImage(divSize, cv.IPL_DEPTH_32F, 1)
        cv.SetZero(tangent16U)
        if method == "slow":
            for x in range(divSize[0]):
                for y in range(divSize[1]):
                    if Mask == None:

                        tang = math.atan2(dy[y, x], dx[y, x]) * self.constant
                        tangent16U[y, x] = int(tang)

                    elif Mask[y, x] > 0:
                        tang = math.atan2(dy[y, x], dx[y, x]) * self.constant
                        tangent16U[y, x] = int(tang)
                    elif Mask[y, x] == 0:
                        tangent16U[y, x] = 0
        elif method == "cv":
            #Calculated the arctan2 which give -pi to pi range
            #I create numpy arrays then reshape them in to 1 Dimesnion.
            #Next, I calculated arctan2 and change the range to 0-2pi and make it in degrees
            #I reshape back to picture format and return the picture

            #Numpy formatting
            (width, height) = cv.GetSize(dx)
            matdx = cv.CreateMat(height, width, cv.CV_16SC1)
            matdy = cv.CreateMat(height, width, cv.CV_16SC1)
            cv.SetZero(matdx)
            cv.SetZero(matdy)
            cv.Copy(dx, matdx, Mask)

            cv.Copy(dy, matdy, Mask)
            a = numpy.asarray(matdx)
            b = numpy.asarray(matdy)

            #Reshaping to one dimension
            ar = numpy.reshape(a, a.size)
            br = numpy.reshape(b, b.size)

            #Calculating Arc Tangent with quadrant information
            c = numpy.arctan2(br, ar)
            #Turning it to -180 to 180 range

            z = numpy.multiply(c, self.constant)
            result = z.astype(numpy.int32)

            result[result < 0] += 360
            tang = numpy.reshape(result, (height, width))
            tang = tang.astype(numpy.float32)
            mat = cv.fromarray(tang)
            cv.Copy(mat, tangent16U)
        else:
            dxTemp = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_16S, 1)
            dyTemp = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_16S, 1)
            zero = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_16S, 1)

            cv.Add(zero, dx, dxTemp, Mask)
            cv.Add(zero, dy, dyTemp, Mask)

            dx = dxTemp
            dy = dyTemp

            for x in range(divSize[0]):
                for y in range(divSize[1]):
                    if Mask[y, x] == 0:
                        tangent16U[y, x] = 0
                        continue
                    tang = math.atan2(dy[y, x], dx[y, x]) * self.constant
                    tangent16U[y, x] = int(tang)

        if self.visualize:
            #tangent2 = cv.CreateImage(cv.GetSize(dy), cv.CV_16SC1, 1)
            cv.ConvertScaleAbs(tangent16U, tangent)
            cv.EqualizeHist(tangent, tangent)
            while True:
                cv.NamedWindow("Tangent")
                cv.ShowImage("Tangent", tangent)
                c = cv.WaitKey(5)
                if c > 0:
                    break
        cv.DestroyAllWindows()

        return tangent16U
コード例 #23
0
ファイル: kde.py プロジェクト: yy2lyx/Trace
    def create_kde_with_trips(self, all_trips, cell_size, gaussian_blur):

        # print ("trips path: ") + str(trips_path)
        print("cell size: ") + str(cell_size)
        print("gaussian blur: ") + str(gaussian_blur)
        conf = ConfigurationManager()
        prefix = conf.getProperty(Constants.output, Constants.prefix)

        sys.stdout.write("\nFinding bounding box... ")
        sys.stdout.flush()

        min_lat = all_trips[0].locations[0].latitude
        max_lat = all_trips[0].locations[0].latitude
        min_lon = all_trips[0].locations[0].longitude
        max_lon = all_trips[0].locations[0].longitude

        # 寻找地图的边界,会存在temp/bounding_boxes/ 目录下
        for trip in all_trips:
            for location in trip.locations:
                if (location.latitude < min_lat):
                    min_lat = location.latitude

                if (location.latitude > max_lat):
                    max_lat = location.latitude

                if (location.longitude < min_lon):
                    min_lon = location.longitude

                if (location.longitude > max_lon):
                    max_lon = location.longitude

        print("done.")

        # find bounding box for data
        min_lat -= 0.0003
        max_lat += 0.0003
        min_lon -= 0.0005
        max_lon += 0.0005

        diff_lat = max_lat - min_lat
        diff_lon = max_lon - min_lon

        trip_file = open(prefix + "bounding_boxes/bounding_box_1m.txt", 'w')
        bound_str = str(min_lat) + " " + str(min_lon) + " " + str(
            max_lat) + " " + str(max_lon)
        trip_file.write(bound_str)
        trip_file.close()

        width = int(diff_lon * spatialfunclib.METERS_PER_DEGREE_LONGITUDE /
                    cell_size)
        height = int(diff_lat * spatialfunclib.METERS_PER_DEGREE_LATITUDE /
                     cell_size)
        yscale = height / diff_lat  # pixels per lat
        xscale = width / diff_lon  # pixels per lon

        # aggregate intensity map for all traces
        # themap = cv.CreateMat(height,width,cv.CV_8U)
        themap = cv.CreateMat(height, width, cv.CV_16UC1)
        cv.SetZero(themap)

        ## Build an aggregate intensity map from all the edges

        trip_counter = 1

        for trip in all_trips:

            if ((trip_counter % 10 == 0) or (trip_counter == len(all_trips))):
                sys.stdout.write("\rCreating histogram (trip " +
                                 str(trip_counter) + "/" +
                                 str(len(all_trips)) + ")... ")
                sys.stdout.flush()
            trip_counter += 1

            temp = cv.CreateMat(height, width, cv.CV_8UC1)
            cv.SetZero(temp)
            temp16 = cv.CreateMat(height, width, cv.CV_16UC1)
            cv.SetZero(temp16)

            for (orig, dest) in pairwise(trip.locations):
                oy = height - int(yscale * (orig.latitude - min_lat))
                ox = int(xscale * (orig.longitude - min_lon))
                dy = height - int(yscale * (dest.latitude - min_lat))
                dx = int(xscale * (dest.longitude - min_lon))
                cv.Line(temp, (ox, oy), (dx, dy), (32), 1, cv.CV_AA)
            #  图片 线段的第一个点 第二个点 线条颜色 线粗细 线类型 shift(点坐标中的小数位数)
            #   参数解释:8(8连通线) 4(4连通线) CV_AA(抗锯齿线)

            # accumulate trips into themap
            cv.ConvertScale(temp, temp16, 1, 0)
            # 源数组 目标数组 比例因子 将值添加到缩放后的源数组元素
            # 使用可选的线性变换将一个数组转换为另外一个数组
            # 用途:将一个数组复制到另外一个数组
            cv.Add(themap, temp16, themap)

        lines = cv.CreateMat(height, width, cv.CV_8U)
        cv.SetZero(lines)

        print("done.")

        trip_counter = 1

        for trip in all_trips:

            if ((trip_counter % 10 == 0) or (trip_counter == len(all_trips))):
                sys.stdout.write("\rCreating drawing (trip " +
                                 str(trip_counter) + "/" +
                                 str(len(all_trips)) + ")... ")
                sys.stdout.flush()
            trip_counter += 1

            for (orig, dest) in pairwise(trip.locations):
                oy = height - int(yscale * (orig.latitude - min_lat))
                ox = int(xscale * (orig.longitude - min_lon))
                dy = height - int(yscale * (dest.latitude - min_lat))
                dx = int(xscale * (dest.longitude - min_lon))
                cv.Line(lines, (ox, oy), (dx, dy), (255), 1, cv.CV_AA)

        # save the lines
        cv.SaveImage(prefix + "raw_data.png", lines)
        print("done.")
        # print "Intensity map acquired."
        sys.stdout.write("Smoothing... ")
        sys.stdout.flush()

        # # create the mask and compute the contour
        cv.Smooth(themap, themap, cv.CV_GAUSSIAN, gaussian_blur, gaussian_blur)
        cv.SaveImage(prefix + "kde.png", themap)

        print("done.")
        print("\nKDE generation complete.")
コード例 #24
0
ファイル: ImageMixer.py プロジェクト: perchrn/TaktPlayer
    def _wipeMix(self, wipeMode, wipeConfig, level, image1, image2, mixMat):
        if((wipeMode == WipeMode.Push)):
            wipeDirection = wipeConfig
            if(wipeDirection < 0.25):
                wipePosX = int(self._internalResolutionX * level)
                sourceLeft = self._internalResolutionX-wipePosX
                sourceTop = 0
                sourceWidth = wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = 0
                destTop = 0
            elif(wipeDirection < 0.5):
                wipePosX = self._internalResolutionX - int(self._internalResolutionX * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = self._internalResolutionX-wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = self._internalResolutionX-(self._internalResolutionX-wipePosX)
                destTop = 0
            elif(wipeDirection < 0.75):
                wipePosY = int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = self._internalResolutionY-wipePosY
                sourceWidth = self._internalResolutionX
                sourceHeight = wipePosY
                destLeft = 0
                destTop = 0
            else:
                wipePosY = self._internalResolutionY - int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = self._internalResolutionX
                sourceHeight = self._internalResolutionY-wipePosY
                destLeft = 0
                destTop = self._internalResolutionY-(self._internalResolutionY-wipePosY)
            destWidth = sourceWidth
            destHeight = sourceHeight
            src_region = cv.GetSubRect(image2, (sourceLeft, sourceTop, sourceWidth, sourceHeight))
            if(image1 == None):
                cv.SetZero(mixMat)
                dst_region = cv.GetSubRect(mixMat, (destLeft, destTop, destWidth, destHeight))
                return mixMat
            else:
                dst_region = cv.GetSubRect(mixMat, (destLeft, destTop, destWidth, destHeight))
            cv.Copy(src_region, dst_region)
            if(wipeDirection < 0.25):
                wipePosX = int(self._internalResolutionX * level)
                sourceLeft = wipePosX
                sourceTop = 0
                sourceWidth = self._internalResolutionX-wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = wipePosX
                destTop = 0
            elif(wipeDirection < 0.5):
                wipePosX = self._internalResolutionX - int(self._internalResolutionX * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = 0
                destTop = 0
            elif(wipeDirection < 0.75):
                wipePosY = int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = wipePosY
                sourceWidth = self._internalResolutionX
                sourceHeight = self._internalResolutionY-wipePosY
                destLeft = 0
                destTop = wipePosY
            else:
                wipePosY = self._internalResolutionY - int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = self._internalResolutionX
                sourceHeight = wipePosY
                destLeft = 0
                destTop = 0
            destWidth = sourceWidth
            destHeight = sourceHeight
            src_region = cv.GetSubRect(image1, (sourceLeft, sourceTop, sourceWidth, sourceHeight))
            dst_region = cv.GetSubRect(mixMat, (destLeft, destTop, destWidth, destHeight))
            cv.Copy(src_region, dst_region)
            return mixMat
        if(wipeMode == WipeMode.Noize):
            scaleArg = wipeConfig
            noizeMask = getNoizeMask(level, self._internalResolutionX, self._internalResolutionY, 1.0 + (19.0 * scaleArg))
            if(image1 == None):
                cv.SetZero(mixMat)
                cv.Copy(image2, mixMat, noizeMask)
                return mixMat
            cv.Copy(image2, image1, noizeMask)
            return image1
        if(wipeMode == WipeMode.Zoom):
            xMove, yMove = wipeConfig
            xSize = int(self._internalResolutionX * level)
            ySize = int(self._internalResolutionY * level)
            xPos = int((self._internalResolutionX - xSize) * xMove)
            yPos = int((self._internalResolutionY - ySize) * (1.0 - yMove))
            cv.SetZero(mixMat)
            dst_region = cv.GetSubRect(mixMat, (xPos, yPos, xSize, ySize))
            cv.Resize(image2, dst_region,cv.CV_INTER_CUBIC)
            if(image1 == None):
                return mixMat
            cv.SetZero(self._mixMixMask1)
            dst_region = cv.GetSubRect(self._mixMixMask1, (xPos, yPos, xSize, ySize))
            cv.Set(dst_region, 256)
            cv.Copy(mixMat, image1, self._mixMixMask1)
            return image1
        if(wipeMode == WipeMode.Flip):
            flipRotation = wipeConfig
            rotation = 1.0 - level
            srcPoints = ((0.0, 0.0),(0.0,self._internalResolutionY),(self._internalResolutionX, 0.0))
            destPoint1 = (0.0, 0.0)
            destPoint2 = (0.0, self._internalResolutionY)
            destPoint3 = (self._internalResolutionX, 0.0)
            if(image1 == None):
                rotation = rotation / 2
            if(rotation < 0.5):
                flipAngle = rotation / 2
            else:
                flipAngle = level / 2
            destPoint1 = rotatePoint(flipRotation, destPoint1[0], destPoint1[1], self._halfResolutionX, self._halfResolutionY, flipAngle)
            destPoint2 = rotatePoint(flipRotation, destPoint2[0], destPoint2[1], self._halfResolutionX, self._halfResolutionY, flipAngle)
            destPoint3 = rotatePoint(flipRotation, destPoint3[0], destPoint3[1], self._halfResolutionX, self._halfResolutionY, flipAngle)
            dstPoints = ((destPoint1[0], destPoint1[1]),(destPoint2[0], destPoint2[1]),(destPoint3[0],destPoint3[1]))
            zoomMatrix = cv.CreateMat(2,3,cv.CV_32F)
#            print "DEBUG pcn: trasform points source: " + str(srcPoints) + " dest: " + str(dstPoints) 
            cv.GetAffineTransform(srcPoints, dstPoints, zoomMatrix)
            if(rotation < 0.5):
                cv.WarpAffine(image2, mixMat, zoomMatrix)
            else:
                cv.WarpAffine(image1, mixMat, zoomMatrix)
            cv.Set(self._mixMixMask2, (255,255,255))
            cv.WarpAffine(self._mixMixMask2, self._mixMixMask1, zoomMatrix)
            return mixMat
        return image2
コード例 #25
0
def _find_corr(matches,
               hom=False,
               data={},
               MAX_PIXEL_DEVIATION=MAX_PIXEL_DEVIATION,
               FALLBACK_PIXEL_DEVIATIONS=FALLBACK_PIXEL_DEVIATIONS,
               rotation_filter_only=False,
               ROT_THRESHOLD_RADIANS=ROT_THRESHOLD_RADIANS):
    data['success'] = False  # by default
    matches = list(matches)
    F = cv.CreateMat(3, 3, cv.CV_64F)
    cv.SetZero(F)
    if not matches or (hom and len(matches) < 4):
        return F, []
    inliers = cv.CreateMat(1, len(matches), cv.CV_8U)
    cv.SetZero(inliers)
    pts_q = cv.CreateMat(len(matches), 1, cv.CV_64FC2)
    pts_db = cv.CreateMat(len(matches), 1, cv.CV_64FC2)
    for i, m in enumerate(matches):
        cv.Set2D(pts_q, i, 0, cv.Scalar(*m['query'][:2]))
        cv.Set2D(pts_db, i, 0, cv.Scalar(*m['db'][:2]))


# ransac for fundamental matrix. rotation filtering
# TODO multiple RANSAC to get smaller/larger features
    if not hom:
        if rotation_filter_only:
            inliers = [1] * len(matches)
        else:
            cv.FindFundamentalMat(pts_q,
                                  pts_db,
                                  F,
                                  status=inliers,
                                  param1=MAX_PIXEL_DEVIATION,
                                  param2=CONFIDENCE_LEVEL)
            inliers = np.asarray(inliers)[0]
        # assumes roll(db) == roll(query)
        for i, m in enumerate(matches):
            if inliers[i]:
                if abs(rot_delta(m, 0)) > ROT_THRESHOLD_RADIANS:
                    inliers[i] = False
        return F, inliers

    # homography only. no rotation check
    cv.FindHomography(pts_db,
                      pts_q,
                      F,
                      method=cv.CV_RANSAC,
                      ransacReprojThreshold=MAX_PIXEL_DEVIATION,
                      status=inliers)

    ### try rounds of homography calculations ###
    i = 0
    while i < len(FALLBACK_PIXEL_DEVIATIONS):
        if not isHomographyGood(F):
            cv.FindHomography(
                pts_db,
                pts_q,
                F,
                method=cv.CV_RANSAC,
                ransacReprojThreshold=FALLBACK_PIXEL_DEVIATIONS[i],
                status=inliers)
            i += 1
        else:
            break
    if i >= len(FALLBACK_PIXEL_DEVIATIONS):
        cv.FindHomography(pts_db, pts_q, F, method=cv.CV_LMEDS, status=inliers)
        if isHomographyGood(F):
            data['success'] = True
    else:
        data['success'] = True

    return F, np.asarray(inliers)[0]