Esempio n. 1
0
    def tree_walk(self, image, x_in, y_in):
        #print "7 tree_walk"
        #called by 8#
        almost_black = (1, 1, 1)

        pixel_list = [(x_in, y_in)]  # first pixel is black save position
        cv.Set2D(image, y_in, x_in, almost_black)  # set pixel to almost black
        to_do = [(x_in, y_in - 1)]  # add neighbours to to do list
        to_do.append([x_in, y_in + 1])
        to_do.append([x_in - 1, y_in])
        to_do.append([x_in + 1, y_in])

        while len(to_do) > 0:
            x, y = to_do.pop()  # get next pixel to test
            if cv.Get2D(image, y,
                        x)[0] == self.black[0]:  # if black pixel found
                pixel_list.append([x, y])  # save pixel position
                cv.Set2D(image, y, x,
                         almost_black)  # set pixel to almost black
                to_do.append([x, y - 1])  # add neighbours to to do list
                to_do.append([x, y + 1])
                to_do.append([x - 1, y])
                to_do.append([x + 1, y])

        return pixel_list
Esempio n. 2
0
def compute_glcm(img, d):
	order = 8
	newimg = quantize(img, order)
	glcm = cv.CreateMat(order,order,cv.CV_8UC1)
	normglcm = cv.CreateMat(order, order, cv.CV_32FC1)
	cv.SetZero(glcm)
	
	div = 255/order
	for i in range(img.rows-d):
		for j in range(img.cols-d):
			val1 = cv.Get2D(newimg, i, j)
			val2 = cv.Get2D(newimg, i+d, j+d)
			p = int(val1[0]/div)
			q = int(val2[0]/div)
			if p>=order:
				p = order -1
			if q>=order:
				q = order -1
			#print p, q
			val3 = cv.Get2D(glcm, p, q)
			cv.Set2D(glcm, p, q, (val3[0]+1))
			
	tot = cv.Sum(glcm)
	for i in range(glcm.rows):
		for j in range(glcm.cols):
			val3 = cv.Get2D(glcm, i , j)
			val = 1.0*val3[0]/tot[0]
			cv.Set2D(normglcm, i, j, (val))
			#print round(float(cv.Get2D(normglcm, i, j)[0]), 3), 
		#print "\n"
		
	return normglcm
Esempio n. 3
0
    def __fill_pixels(self, r, c):
        fulld = self.__pixw / self.__stroke_width

        # Get the average of the pixels
        value = cv.Get2D(self.__resized, r, c)[0]
        actual = self.__map_range(value, 0, 255, 0, 1) ** (1 / self.__gamma)

        points = []

        if actual > WHITE_THRESH:
            cv.Set2D(self.__output, r, c, 255)
            return []
        elif actual > AMP_THRESH:
            up = self.__pixh * self.__map_range(actual, AMP_THRESH, WHITE_THRESH, (1 - OVERLAP), 0.5)
            down = self.__pixh * self.__map_range(actual, AMP_THRESH, WHITE_THRESH, OVERLAP, 0.5)

            cv.Set2D(self.__output, r, c, int(255 * actual))

            points.append((self.__pixw / 2.0, down))
            points.append((self.__pixw, up))
        else:
            linecount = int(self.__map_range(actual, 0, AMP_THRESH, fulld, 1))
            cv.Set2D(self.__output, r, c, 
                int(AMP_THRESH*255 - ((linecount-1) / float(fulld-1)) ** (1/2.2) * AMP_THRESH*255))

            gap = float(self.__pixw) / linecount
            up = self.__pixh * (1 - OVERLAP)
            down = self.__pixh * OVERLAP

            # Here is where the magic happens
            for i in range(linecount):
                points.append((gap * (i + 0.5), down))
                points.append((gap * (i + 1), up))
        return points
Esempio n. 4
0
    def flood_fill_edge(self, canny):
        width, height = cv.GetSize(canny)

        # set boarder pixels to white
        for x in range(width):
            cv.Set2D(canny, 0, x, self.white)
            cv.Set2D(canny, height - 1, x, self.white)

        for y in range(height):
            cv.Set2D(canny, y, 0, self.white)
            cv.Set2D(canny, y, width - 1, self.white)

        # prime to do list
        to_do = [(2, 2)]
        to_do.append([2, height - 3])
        to_do.append([width - 3, height - 3])
        to_do.append([width - 3, 2])

        while len(to_do) > 0:
            x, y = to_do.pop()                               # get next pixel to test
            if cv.Get2D(canny, y, x)[0] == self.black[0]:    # if black pixel found
                cv.Set2D(canny, y, x, self.white)            # set pixel to white
                to_do.append([x, y - 1])                     # add neighbours to to do list
                to_do.append([x, y + 1])
                to_do.append([x - 1, y])
                to_do.append([x + 1, y])
Esempio n. 5
0
def hsv_filter(src,
               low_h,
               high_h,
               min_s,
               max_s,
               min_v,
               max_v,
               hue_bandstop=False):
    '''Takes an 8-bit bgr src image and does simple hsv thresholding.

    A binary image of the same size will be returned.  HSV values have the
    following ranges:
           Hue - 0 to 360
    Saturation - 0 to 255
         Value - 0 to 255

    If hue_bandstop is True, the low_h and high_h will act as a band stop
    filter on hue, otherwise hue will be a band pass filter.

    '''

    # OpenCV expects hue to be ranged from 0-180, since 0-360 wouldn't fit
    # inside a byte.
    high_h /= 2
    low_h /= 2

    hsv = cv.CreateImage(cv.GetSize(src), 8, 3)
    binary = cv.CreateImage(cv.GetSize(src), 8, 1)
    cv.SetZero(binary)
    cv.CvtColor(src, hsv, cv.CV_BGR2HSV)

    data = hsv.tostring()
    for y in xrange(0, hsv.height):
        for x in xrange(0, hsv.width):
            index = y * hsv.width * 3 + x * 3
            h = ord(data[0 + index])
            s = ord(data[1 + index])
            v = ord(data[2 + index])

            if hue_bandstop:
                if (h <= low_h or h >= high_h) and \
                   s >= min_s and \
                   s <= max_s and \
                   v >= min_v and \
                   v <= max_v:

                    cv.Set2D(binary, y, x, (255, ))

            else:
                if h >= low_h and \
                   h <= high_h and \
                   s >= min_s and \
                   s <= max_s and \
                   v >= min_v and \
                   v <= max_v:

                    cv.Set2D(binary, y, x, (255, ))

    return binary
Esempio n. 6
0
def solve(C, Q, matches, dbsiftpath, dbimgpath):

  # open EarthMine info
  info = os.path.join(C.infodir, os.path.basename(dbimgpath)[:-4] + '.info')
  em = render_tags.EarthmineImageInfo(dbimgpath, info)
  map3d = C.pixelmap.open(dbsiftpath)

  # find non-None features
  vector = []
  for i, m in enumerate(matches):
    d = m['db']

    # get 3d pt of feature
    feature = map3d[int(d[0]), int(d[1])]
    if not feature:
      continue

    # convert from latlon to meters relative to earthmine camera
    pz, px = geom.lltom(em.lat, em.lon, feature['lat'], feature['lon'])
    py = feature['alt'] - em.alt
    vector.append((m['query'][:2], (px, py, -pz)))

  print vector[0]

  # reference camera matrix
  # f 0 0
  # 0 f 0
  # 0 0 1
  A = cv.CreateMat(3, 3, cv.CV_64F)
  cv.SetZero(A)
  f = 662 # focal len?
  cv.Set2D(A, 0, 0, cv.Scalar(f))
  cv.Set2D(A, 1, 1, cv.Scalar(f))
  cv.Set2D(A, 2, 2, cv.Scalar(1))

  # convert vector to to cvMats
  objectPoints3d = cv.CreateMat(len(vector), 1, cv.CV_64FC3)
  imagePoints2d = cv.CreateMat(len(vector), 1, cv.CV_64FC2)
  for i, (p2d, p3d) in enumerate(vector):
    cv.Set2D(imagePoints2d, i, 0, cv.Scalar(*p2d))
    cv.Set2D(objectPoints3d, i, 0, cv.Scalar(*p3d))

  coeff = cv.CreateMat(4, 1, cv.CV_64F)
  rvec = cv.CreateMat(3, 1, cv.CV_64F)
  tvec = cv.CreateMat(3, 1, cv.CV_64F)
  cv.SetZero(coeff)
  cv.SetZero(rvec)
  cv.SetZero(tvec)

  # since rvec, tvec are zero the initial guess is the earthmine camera
  ret = cv.FindExtrinsicCameraParams2(objectPoints3d, imagePoints2d, A,
    coeff, rvec, tvec, useExtrinsicGuess=False)
  np_rvec = np.matrix(rvec)
  np_tvec = np.matrix(tvec)
  print np_rvec
  print np_tvec
  return np_rvec, np_tvec
Esempio n. 7
0
def cambio(imagen):
    imagen = cv.LoadImage(imagen, cv.CV_LOAD_IMAGE_COLOR)  ##cargamos imagen
    xx, yy = cv.GetSize(imagen)  ##sacamos el tam

    for i in range(xx):
        for y in range(yy):
            if cv.Get2D(imagen, y, i) == (0.0, 0.0, 0.0, 0.0):
                cv.Set2D(imagen, y, i, (255.0, 255.0, 255.0, 255.0))
            else:
                cv.Set2D(imagen, y, i, (0.0, 0.0, 0.0, 0.0))
            if y < yy / 3:  ## solo tomamos 3/4 de la imagen para verificacion
                cv.Set2D(imagen, y, i, (0.0, 0.0, 0.0, 0.0))
    cv.SaveImage("ngrey.jpg", imagen)  ##guardamos la imagen
Esempio n. 8
0
def createLineImage(image, channel=0):
    print "Creating line image"
    for y in range(image.height - 1):
        max_value = 0
        max_position = 0
        for x in range((image.width - 1)):
            pixel = cv.Get2D(image, y, x)
            if pixel[channel] > max_value:
                max_value = pixel[channel]
                max_position = x
            pixel = cv.RGB(0, 0, 0)
            cv.Set2D(image, y, x, pixel)
        pixel = cv.RGB(max_value, 0, 0)
    cv.Set2D(image, y, max_position, pixel)
    return image
Esempio n. 9
0
 def change_by_hue(self):
     for x in range(self.threshold.width):
         for y in range(self.threshold.height):
             if cv.Get2D(self.threshold, y, x)[0] == 255:
                 val = cv.Get2D(self.hsv, y, x)
                 cv.Set2D(self.hsv, y, x, (self.hue, val[1], val[2]))
     cv.CvtColor(self.hsv, self.image_color, cv.CV_HSV2BGR)
Esempio n. 10
0
def overlay_image(frame, image, x, y, w, h):
    """resize and overlay an image where a feature is detected

    This resizes the corresponding image to a matched feature, then loops
    through all of its pixels to superimpose the image on the frame
    """
    # resize the image to fit the detected feature
    new_feature = cv.CreateImage((w, h), 8, 3)
    cv.Resize(image, new_feature, interpolation=cv.CV_INTER_AREA)

    # overlay the image on the frame
    for py in xrange(h):
        for px in xrange(w):
            pixel = cv.Get2D(new_feature, py, px)

            # don't map the whitespace surrounding the image
            if pixel != (255.0, 255.0, 255.0, 0.0):
                if image is tophat:
                    # above feature
                    new_y = y - py
                elif image is moustache:
                    # bottom half of feature
                    new_y = (h / 2) + y + py
                else:
                    # over feature
                    new_y = y + py
                new_x = x + px

                # make sure the image is in the frame
                if 0 < new_x < frame.width and 0 < new_y < frame.height:
                    cv.Set2D(frame, new_y, new_x, pixel)
Esempio n. 11
0
def main():
    os.chdir(os.path.join(sys.argv[1], "motion"))
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except OSError:
        pass

    #os.system("del klein\\*.png")
    os.system("convert motion_*.png -adaptive-resize 500x500! " +
              OUTPUT_DIR_NAME + "\\motion_%02d.png")

    os.chdir(OUTPUT_DIR_NAME)
    os.system("convert motion_*.png -append result.png")

    img = cv.LoadImageM("result.png")
    values = []

    for y in range(img.rows):
        value = cv.Get1D(cv.GetRow(img, y), 0)[0]
        values.append(value)

    values.sort(reverse=True)

    output_img = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
    for y in range(img.rows):
        for x in range(img.cols):
            cv.Set2D(output_img, y, x, cv.RGB(values[y], values[y], values[y]))

    cv.SaveImage("result_sorted.png", output_img)

    raw_input("- done -")
    return
Esempio n. 12
0
def proj_board(im, xl, yl, z):
    color = cv.CV_RGB(0, 255, 0)
    image_size = (im.width, im.height)
    for x in arange(xl - 9, xl + 9, 0.5):
        for y in arange(yl - 9, yl + 9, 0.5):
            X = array([x, y, z])
            q = dot(K, X)
            q = [int(q[0] / q[2]), int(q[1] / q[2])]
            cv.Set2D(im, im.height - q[1], q[0], color)
Esempio n. 13
0
    def red_eye(self):
        self.load_cascade_file()
        faces = [
            face for face in self.context.request.focal_points
            if face.origin == 'Face Detection'
        ]
        if faces:
            engine = self.context.modules.engine
            mode, data = engine.image_data_as_rgb()
            mode = mode.lower()
            sz = engine.size
            image = cv.CreateImageHeader(sz, cv.IPL_DEPTH_8U, 3)
            cv.SetData(image, data)

            for face in faces:
                face_x = int(face.x - face.width / 2)
                face_y = int(face.y - face.height / 2)

                face_roi = (int(face_x), int(face_y), int(face.width),
                            int(face.height))

                cv.SetImageROI(image, face_roi)

                eyes = cv.HaarDetectObjects(image, self.cascade,
                                            cv.CreateMemStorage(0), HAAR_SCALE,
                                            MIN_NEIGHBORS, HAAR_FLAGS,
                                            MIN_SIZE)

                for (x, y, w, h), other in self.filter_eyes(eyes):
                    # Set the image Region of interest to be the eye area [this reduces processing time]
                    cv.SetImageROI(image, (face_x + x, face_y + y, w, h))

                    if self.context.request.debug:
                        cv.Rectangle(image, (0, 0), (w, h),
                                     cv.RGB(255, 255, 255), 2, 8, 0)

                    for pixel in self.get_pixels(image, w, h, mode):
                        green_blue_avg = (pixel['g'] + pixel['b']) / 2

                        if not green_blue_avg:
                            red_intensity = RED_THRESHOLD
                        else:
                            # Calculate the intensity compared to blue and green average
                            red_intensity = pixel['r'] / green_blue_avg

                        # If the red intensity is greater than 2.0, lower the value
                        if red_intensity >= RED_THRESHOLD:
                            new_red_value = (pixel['g'] + pixel['b']) / 2
                            # Insert the new red value for the pixel to the image
                            cv.Set2D(
                                image, pixel['y'], pixel['x'],
                                cv.RGB(new_red_value, pixel['g'], pixel['b']))

                    # Reset the image region of interest back to full image
                    cv.ResetImageROI(image)

            self.context.modules.engine.set_image_data(image.tostring())
Esempio n. 14
0
 def vers_iplimage(self, profondeur=cv.IPL_DEPTH_8U):
     "Renvoie l'iplimage équivalente (format OpenCV)"
     temp = cv.CreateImage((self.largeur, self.hauteur),
                           cv.IPL_DEPTH_64F, 1)
     for (i, ligne) in enumerate(self.tab):
         for (j, valeur) in enumerate(ligne):
             cv.Set2D(temp, i, j, valeur)
     img = cv.CreateImage((self.largeur, self.hauteur), profondeur, 1)
     cv.Convert(temp, img)
     return img
Esempio n. 15
0
 def change_by_rgb(self):
     avgs = self.get_avgs(self.image_color)
     for x in range(self.threshold.width):
         for y in range(self.threshold.height):
             if cv.Get2D(self.threshold, y, x)[0] == 255:
                 val = cv.Get2D(self.image_color, y, x)
                 difs = (val[2] - avgs[0], val[1] - avgs[1],
                         val[0] - avgs[2])
                 cv.Set2D(self.image_color, y, x,
                          (self.new_color[2] + difs[2], self.new_color[1] +
                           difs[1], self.new_color[0] + difs[0], 0))
Esempio n. 16
0
def quantize(img, order):
	div = 255/order
	for i in range(img.rows):
		for j in range(img.cols):
			val = cv.Get2D(img, i, j)
			newval = (int(val[0]/div))*div
			#imprime el valor nuevo
			if newval>255:
				newval = 255-1
			cv.Set2D(img, i, j, (newval))
	cv.ShowImage("Quantize", img)
	cv.WaitKey(0)
	return img
Esempio n. 17
0
    def change_by_hsv(self):
        avgs = self.get_avgs(self.hsv)

        for x in range(self.threshold.width):
            for y in range(self.threshold.height):
                if cv.Get2D(self.threshold, y, x)[0] == 255:
                    val = cv.Get2D(self.hsv, y, x)
                    difs = (val[0] - avgs[0], val[1] - avgs[1],
                            val[2] - avgs[2])
                    cv.Set2D(self.hsv, y, x,
                             (self.new_hsv[0] + difs[0], self.new_hsv[1] +
                              difs[1], self.new_hsv[2] + difs[2]))
        cv.CvtColor(self.hsv, self.image_color, cv.CV_HSV2BGR)
Esempio n. 18
0
    def flood_fill_edge(self, canny):
        width, height = cv.GetSize(canny)

        # set boarder pixels to white  把边界全弄白
        for x in range(width):
            cv.Set2D(canny, 0, x, self.white)
            cv.Set2D(canny, height - 1, x, self.white)

        for y in range(height):
            cv.Set2D(canny, y, 0, self.white)
            cv.Set2D(canny, y, width - 1, self.white)

        # prime to do list   把这些地方变白(边界)
        to_do = [(2, 2)]
        to_do.append([2, height - 3])
        to_do.append([width - 3, height - 3])
        to_do.append([width - 3, 2])

        #遍历所有添加到List中,什么时候停止?
        while len(to_do) > 0:
            x, y = to_do.pop()  # get next pixel to test
            if cv.Get2D(canny, y,
                        x)[0] == self.black[0]:  # if black pixel found
                cv.Set2D(canny, y, x, self.white)  # set pixel to white
                to_do.append([x, y - 1])  # add neighbours to to do list
                to_do.append([x, y + 1])
                to_do.append([x - 1, y])
                to_do.append([x + 1, y])

        # display the flood
        #cv.ShowImage("flood Canny", canny)
        file_name = self.image_dir + "flood.jpg"
        cv.SaveImage(file_name, canny)

        # 3ms wait
        cv.WaitKey(3)
Esempio n. 19
0
    def look_for_chess_board(self, canny):
        print "8 look_for_chess_board"
        #called by 24#
        width, height = cv.GetSize(canny)

        centre = (0, 0)
        max_area = 0

        # for all but edge pixels
        for x in range(1, width - 2):
            for y in range(1, height - 2):
                if cv.Get2D(canny, y,
                            x)[0] == self.black[0]:  # black pixel found
                    pixel_list = self.tree_walk(canny, x,
                                                y)  # tree walk pixel "go to 7"
                    if len(
                            pixel_list
                    ) < self.min_area:  # if object too small //back from 7
                        for l in pixel_list:
                            cv.Set2D(canny, l[1], l[0],
                                     self.white)  # set pixel to white
                    else:  # if object found
                        n = len(pixel_list)
                        if n > max_area:  # if largest object found
                            sum_x = 0  # find centre of object
                            sum_y = 0
                            for p in pixel_list:
                                sum_x = sum_x + p[0]
                                sum_y = sum_y + p[1]

                            centre = sum_x / n, sum_y / n  # save centre of object
                            max_area = n  # save area of object

        if max_area > 0:  # in board found
            cv.Circle(canny, (centre), 9, (250, 250, 250),
                      -1)  # mark board centre

    # display the modified canny
        cv.ShowImage("Modified Canny", canny)
        print "7 tree_walk"
        # 3ms wait
        cv.WaitKey(3)

        #print "centre: ", centre

        return centre  # return centre of object
Esempio n. 20
0
def get_panel(cv, img, x, y, w, h, xsubdiv=10, ysubdiv=2, color=(255, 0, 255)):
    vals = []
    xs = w / float(xsubdiv)
    ys = h / float(ysubdiv)
    for i in range(0, xsubdiv):
        xx = int(x + (i + .5) * xs + 0.5)
        for j in range(0, ysubdiv):
            yy = int(y + (j + .5) * ys + 0.5)
            # measure the square
            # http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=avg#mean
            # cv.SetImageROI(img, (x, y, w, h))
            # cv.Avg()
            # cv.ResetImageROI(img)
            val = cv.Get2D(img, yy, xx)
            vals.append(val)
            cv.Set2D(img, yy, xx, (255, 0, 0))
    return vals
Esempio n. 21
0
    def splitDika(self, image):
        segmentLine = ToLines()
        totalLines = segmentLine.segment(image)
        pixelCounter = PixelCounter()

        for lineNum in range(totalLines):
            lineImg = segmentLine.getLineSegment(lineNum)
            lineRowRange = segmentLine.getCoordinatesOfLineSegment(lineNum)
            countArr = pixelCounter.getCountArr_V(lineImg, 0)
            #print countArr
            min = pixelCounter.getMin(countArr)
            #dika width is min vertically and it is most occurred horizontally

            for x in pixelCounter.getOccurredPositionList(countArr, min):
                for height in range(lineRowRange[0],
                                    (lineRowRange[1] + lineRowRange[0]) / 2):
                    cv.Set2D(image, height, x, 255)
        return image
Esempio n. 22
0
    def get_candidates(self, m_d):
        '''
        Get candidates for this corner from new image
        @param m_d: marker_detector
        '''
        # if this corner is wider then MAX_CORNER_ANGLE, we probably won't
        # find it anyway. Instead lets find narrow corners and calculate its
        # position
        if self.angle > MAX_CORNER_ANGLE: return []
        cr = self.get_rectangle(m_d)
        cr = correct_rectangle(cr, m_d.size)
        if cr is None: return []
        m_d.set_ROI(cr)
        tmp_img = m_d.tmp_img
        gray_img = m_d.gray_img
        bw_img = m_d.bw_img
        canny = m_d.canny_img
        cv.Copy(gray_img, tmp_img)
        cv.Threshold(gray_img, bw_img, 125, 255, cv.CV_THRESH_OTSU)
        if self.black_inside > 0:
            cv.Not(bw_img, bw_img)
        cv.Canny(gray_img, canny, 300, 500)
        cv.Or(bw_img, canny, bw_img)
        tmpim = m_d.canny_img
        cv.Copy(bw_img, tmpim)
        cv.Set2D(tmpim, 1, 1, 255)
        conts = cv.FindContours(tmpim, cv.CreateMemStorage(),
                                cv.CV_RETR_EXTERNAL)
        cv.Zero(tmpim)
        m_d.set_ROI()
        cv.SetImageROI(tmpim, cr)
        result = []
        while conts:
            aconts = cv.ApproxPoly(conts, cv.CreateMemStorage(),
                                   cv.CV_POLY_APPROX_DP, 2)
            nconts = list(aconts)
            cv.PolyLine(tmpim, [nconts], True, (255, 255, 255))
            self._append_candidates_from_conts(cr, result, nconts, m_d)
            conts = conts.h_next()


#        print result
#        db.show([tmpim,m_d.draw_img], 'tmpim', 0, 0, 0)
        return result
Esempio n. 23
0
def calcEM(hist1, hist2, l_bins=16, u_bins=16, v_bins=16):

    #Define number of rows
    numRows = l_bins * u_bins * v_bins

    sig1 = cv.CreateMat(numRows, 4, cv.CV_32FC1)
    sig2 = cv.CreateMat(numRows, 4, cv.CV_32FC1)
    eq_val = 1.0 / numRows

    for l in range(l_bins):
        for u in range(u_bins):
            for v in range(v_bins):
                bin_val = cv.QueryHistValue_3D(hist1, l, u, v)

                cv.Set2D(sig1, l * u_bins * v_bins + u * v_bins + v, 0,
                         cv.Scalar(bin_val))
                cv.Set2D(sig1, l * u_bins * v_bins + u * v_bins + v, 1,
                         cv.Scalar(l))
                cv.Set2D(sig1, l * u_bins * v_bins + u * v_bins + v, 2,
                         cv.Scalar(u))
                cv.Set2D(sig1, l * u_bins * v_bins + u * v_bins + v, 3,
                         cv.Scalar(v))

                if hist2 == None:
                    bin_val = eq_val
                else:
                    bin_val = cv.QueryHistValue_3D(hist2, l, u, v)
                cv.Set2D(sig2, l * u_bins * v_bins + u * v_bins + v, 0,
                         cv.Scalar(bin_val))
                cv.Set2D(sig2, l * u_bins * v_bins + u * v_bins + v, 1,
                         cv.Scalar(l))
                cv.Set2D(sig2, l * u_bins * v_bins + u * v_bins + v, 2,
                         cv.Scalar(u))
                cv.Set2D(sig2, l * u_bins * v_bins + u * v_bins + v, 3,
                         cv.Scalar(v))

    #This is the important line were the OpenCV EM algorithm is called
    return (cv.CalcEMD2(sig1, sig2, cv.CV_DIST_L2), np.asarray(sig1)[:, 0],
            np.asarray(sig2)[:, 0])
def process_file(filenameIN, WIDTH=31, HEIGHT=31):

    print "processing file: " + filenameIN
    if not (os.path.exists(filenameIN)):
        print "file not found. Aborting."
        return
    else:
        srcImg = cv.LoadImage(filenameIN, 0)
        res = cv.CreateImage((WIDTH, HEIGHT), cv.IPL_DEPTH_8U, 1)
        cv.Set(res, 255)
        xmin = WIDTH
        xmax = 0
        ymin = HEIGHT
        ymax = 0
        for i in range(srcImg.width):
            for j in range(srcImg.height):
                #print "xmax"
                #print cv.Get2D(srcImg, j, i)
                if cv.Get2D(srcImg, j, i)[0] == 0.0:
                    #print "xin"
                    if i < xmin:
                        xmin = i
                    if i > xmax:
                        xmax = i
                    if j < ymin:
                        ymin = j
                    if j > ymax:
                        ymax = j

        offsetx = (WIDTH - (xmax - xmin)) / 2
        offsety = (HEIGHT - (ymax - ymin)) / 2
        #print 'WIDTH',WIDTH,"offset",offsety,offsetx
        for i in range(xmax - xmin):
            for j in range(ymax - ymin):
                if ((offsety + j > 0) and (offsety + j < res.height)
                        and (offsetx + i > 0) and (offsetx + i < res.width)):
                    #print "haha"
                    cv.Set2D(res, offsety + j, offsetx + i,
                             cv.Get2D(srcImg, ymin + j, xmin + i))

        cv.SaveImage(filenameIN, res)
Esempio n. 25
0
    def look_for_ball_tray(self, canny):

        width, height = cv.GetSize(canny)

        centre = (0, 0)
        max_area = 0

        # for all but edge pixels
        #从0开始
        for x in range(1, width - 2):
            for y in range(1, height - 2):
                if cv.Get2D(canny, y,
                            x)[0] == self.black[0]:  # black pixel found
                    pixel_list = self.tree_walk(canny, x, y)  # tree walk pixel
                    if len(pixel_list) < self.min_area:  # if object too small
                        for l in pixel_list:
                            cv.Set2D(canny, l[1], l[0],
                                     self.white)  # set pixel to white
                    else:  # if object found
                        n = len(pixel_list)
                        if n > max_area:  # if largest object found
                            sum_x = 0  # find centre of object
                            sum_y = 0
                            for p in pixel_list:
                                sum_x = sum_x + p[0]
                                sum_y = sum_y + p[1]

                            centre = sum_x / n, sum_y / n  # save centre of object
                            max_area = n  # save area of object
                            #像素的个数
        if max_area > 0:  # in tray found
            cv.Circle(canny, (centre), 9, (250, 250, 250),
                      -1)  # mark tray centre

        # display the modified canny
        #cv.ShowImage("Modified Canny", canny)

        # 3ms wait
        cv.WaitKey(3)

        return centre  # return centre of object
Esempio n. 26
0
    def view(self, sample_idx):
        """
        Return an image of the spectrogram centered around the sample at
        sample_idx.

        Arguments:
            sample_idx: The offset into data which to center and highlight.

        Returns:
            A spectrogram in the form of an OpenCV IplImage with the sample_idx
            at the center. The image is always ``display_width`` pixels wide.
            The current sample window is highlighted.
        """
        window_idx = self.spectrogram.window_from_sample(sample_idx)

        # Center view on current window, but stop snap view to edges if we're
        # at the beginning or end.
        if self.display_width == self.spectrogram.n_windows:
            view_start = 0  # Display has shrunk to number of windows
        elif window_idx < self.display_width / 2:
            view_start = 0
        elif window_idx > self.spectrogram.n_windows - self.display_width / 2:
            view_start = self.spectrogram.n_windows - self.display_width - 1
        else:
            view_start = window_idx - self.display_width // 2

        img = self.spectrogram.get_slice(view_start,
                                         view_start + self.display_width)
        self.height = img.height

        # Draw line for current window
        for y in xrange(img.height):
            x = min(int(window_idx - view_start), img.width - 1)
            v = 255 - cv.Get2D(img, y, x)[0]
            cv.Set2D(img, y, x, (v, v, v))

        return img
Esempio n. 27
0
    def init_empty_img(self):
        # calculate Union{ near_regions } for all building
        nearby_region_polys = [bd.near_region_poly for bd in self.buildings]
        all_near_regions = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        cv.FillPoly(all_near_regions, [nearby_region_polys[0]], im.color.blue)
        for poly in nearby_region_polys:
            tmp_canvas = cv.CreateImage(
                (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U,
                1)
            cv.FillPoly(tmp_canvas, [poly], im.color.blue)
            cv.Or(tmp_canvas, all_near_regions, all_near_regions)

        # find the "empty" region
        empty_region = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        cv.CmpS(all_near_regions, 0, empty_region, cv.CV_CMP_EQ)

        for ele in it.nonzero_indices(cv.GetMat(empty_region)):
            y, x = ele
            y, x = int(y), int(x)
            nearest_bd = self.get_nearest_building(x, y)
            cv.Set2D(empty_region, y, x, nearest_bd.bid)
        return empty_region
Esempio n. 28
0
def calcEM(hist1, hist2, h_bins, s_bins):
    #Define number of rows
    numRows = h_bins * s_bins

    sig1 = cv.CreateMat(numRows, 3, cv.CV_32FC1)
    sig2 = cv.CreateMat(numRows, 3, cv.CV_32FC1)

    for h in range(h_bins):
        for s in range(s_bins):
            bin_val = cv.QueryHistValue_2D(hist1, h, s)
            cv.Set2D(sig1, h * s_bins + s, 0, cv.Scalar(bin_val))
            cv.Set2D(sig1, h * s_bins + s, 1, cv.Scalar(h))
            cv.Set2D(sig1, h * s_bins + s, 2, cv.Scalar(s))

            bin_val = cv.QueryHistValue_2D(hist2, h, s)
            cv.Set2D(sig2, h * s_bins + s, 0, cv.Scalar(bin_val))
            cv.Set2D(sig2, h * s_bins + s, 1, cv.Scalar(h))
            cv.Set2D(sig2, h * s_bins + s, 2, cv.Scalar(s))

    #This is the important line were the OpenCV EM algorithm is called
    return cv.CalcEMD2(sig1, sig2, cv.CV_DIST_L2)
Esempio n. 29
0
def main():
    os.chdir(sys.argv[1])

    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except:
        pass

    tree = et.parse("project.xml")

    movie = tree.getroot()
    file_path = movie.attrib["path"]

    cap = cv.CreateFileCapture(file_path)
    cv.QueryFrame(cap)

    # skip frames in the beginning, if neccessary
    start_frame = int(movie.attrib["start_frame"])
    for i in range(start_frame):
        cv.QueryFrame(cap)

    f = open("scenes.txt", "r")
    lines = [line for line in f if line]
    f.close()

    w = None
    h = None
    radius = None
    umfang = None

    t = time.time()

    for nr, line in enumerate(lines):
        print(nr + 1), "/", len(lines)

        width = int(line.split("\t")[2])

        output_img = None

        for frame_counter in range(width):
            img = cv.QueryFrame(cap)
            if not img:
                break

            if nr == 0:
                w = img.width
                h = img.height
                radius = int((0.9 * h) / 2)
                umfang = int(2 * math.pi * radius)

            if frame_counter == 0:
                output_img = cv.CreateImage((umfang, width), cv.IPL_DEPTH_8U,
                                            3)
                cv.SaveImage(
                    os.path.join(OUTPUT_DIR_NAME, "core_%04d_a.png" % nr), img)
            elif frame_counter == width - 1:
                cv.SaveImage(
                    os.path.join(OUTPUT_DIR_NAME, "core_%04d_b.png" % nr), img)

            for i in range(umfang):
                alpha = math.radians(i * (360.0 / umfang))
                x = (w / 2) + math.sin(alpha) * radius
                y = (h / 2) + math.cos(alpha) * radius
                px = cv.Get2D(img, int(y), int(x))
                cv.Set2D(output_img, frame_counter, i, px)

        cv.SaveImage(os.path.join(OUTPUT_DIR_NAME, "core_%04d.png" % nr),
                     output_img)

    print "%.2f min" % ((time.time() - t) / 60)
    raw_input("- done -")
    return
def split_captcha(filenameIN):
    threshold = 150
    threshold = 200
    maxValue = 255
    thresholdType = cv.CV_THRESH_BINARY
    srcImg = cv.LoadImage(filenameIN, 1)
    grayThresh = cv.CreateImage((srcImg.width, srcImg.height), cv.IPL_DEPTH_8U,
                                1)
    cv.CvtColor(srcImg, grayThresh, cv.CV_BGR2GRAY)
    cv.Threshold(grayThresh, grayThresh, threshold, maxValue, thresholdType)
    cv.SaveImage((filenameIN + "grayThresh.bmp"), grayThresh)
    connectivity = 4
    CCs4 = []

    gray4 = cv.CloneImage(grayThresh)

    for i in range(gray4.width):
        for j in range(gray4.height):
            if (cv.Get2D(gray4, j, i)[0] == 0):
                cc = CC()
                cc.mask = cv.CreateImage((gray4.width + 2, gray4.height + 2),
                                         cv.IPL_DEPTH_8U, 1)
                cv.Zero(cc.mask)
                cc.comp = cv.FloodFill(gray4, (i, j), cv.Scalar(128),
                                       cv.ScalarAll(0), cv.ScalarAll(0),
                                       connectivity, cc.mask)
                CCs4.append(cc)

    CCs4.sort(cmp=func_compare_area_cc)

    size = len(CCs4)
    for i in range(size):
        if (CCs4[size - 1 - i].comp[0] < 20):
            CCs4.pop()

    connectivity = 8
    CCs8 = []
    gray8 = cv.CloneImage(grayThresh)
    for i in range(gray8.width):
        for j in range(gray8.height):
            if (cv.Get2D(gray8, j, i)[0] == 0):
                cc = CC()
                cc.mask = cv.CreateImage((gray8.width + 2, gray8.height + 2),
                                         cv.IPL_DEPTH_8U, 1)
                cv.Zero(cc.mask)
                cc.comp = cv.FloodFill(gray8, (i, j), cv.Scalar(128),
                                       cv.ScalarAll(0), cv.ScalarAll(0),
                                       connectivity, cc.mask)
                CCs8.append(cc)
    CCs8.sort(cmp=func_compare_area_cc)

    size = len(CCs8)
    for i in range(size):
        if (CCs8[size - 1 - i].comp[0] < 20):
            CCs8.pop()

    CCs = []
    CCs = copy.copy(CCs8)
    # if (len(CCs8) < 3):
    #     CCs = copy.copy(CCs4)
    # else :
    #     if (CCs4[2].comp[0] < 20):
    #         CCs = copy.copy(CCs8)
    #     else:
    #         CCs = copy.copy(CCs4)
    CCs.sort(cmp=func_compare_pos_cc)
    letters = []
    letters_path = []

    for i in range(len(CCs)):
        letter = cv.CreateImage((WIDTH, HEIGHT), cv.IPL_DEPTH_8U, 1)
        cv.Set(letter, 255)
        letters.append(letter)
    for index_image in range(len(letters)):
        letter = letters[index_image]
        cc = CCs[index_image]

        offsetx = (WIDTH - cc.comp[2][2]) / 2
        offsety = (HEIGHT - cc.comp[2][3]) / 2

        for i in range(1, cc.mask.width - 1):
            for j in range(1, cc.mask.height - 1):
                if (cv.Get2D(cc.mask, j, i)[0] == 1):
                    Y = j - cc.comp[2][1] + offsety
                    X = i - cc.comp[2][0] + offsetx

                    if ((X > 0) and (X < WIDTH) and (Y > 0) and (Y < HEIGHT)):
                        cv.Set2D(letter, j - cc.comp[2][1] + offsety,
                                 i - cc.comp[2][0] + offsetx, cv.Scalar(0))
        letters_path.append(filenameIN + str(index_image + 1) + ".bmp")
        cv.SaveImage((filenameIN + str(index_image + 1) + ".bmp"),
                     letters[index_image])
        process_file(letters_path[index_image], WIDTH=31, HEIGHT=31)
    return letters_path