コード例 #1
0
ファイル: segment.py プロジェクト: dyzhou2015/captioncapture
def rects_to_mask(regions, mask, inverted=False, value=255):
    if inverted:
        cv.Set(mask, cv.ScalarAll(value))
        colour = cv.ScalarAll(0)
    else:
        cv.Zero(mask)
        colour = cv.ScalarAll(value)
    for rect in regions:
        cv.Rectangle(mask, (rect[0], rect[1]),
                     (rect[0] + rect[2], rect[1] + rect[3]), colour, -1)
コード例 #2
0
def regions_to_mask(regions, mask, inverted=False, value=255):
    if inverted:
        cv.Set(mask, cv.ScalarAll(value))
        colour = cv.ScalarAll(0)
    else:
        cv.Zero(mask)
        colour = cv.ScalarAll(value)
    for rect in regions:
        if not hasattr(rect, "x"):
            rect = rect.rect
        cv.Rectangle(mask, (rect.x, rect.y), (rect.x + rect.width, rect.y + rect.height), colour, -1)
コード例 #3
0
def addGaussianNoise(image_path, save_path):
    img = cv.LoadImage(image_path)
    noise = cv.CreateImage(cv.GetSize(img), img.depth, img.nChannels)
    cv.SetZero(noise)
    rng = cv.RNG(-1)
    cv.RandArr(rng, noise, cv.CV_RAND_NORMAL, cv.ScalarAll(0),
               cv.ScalarAll(25))
    cv.Add(img, noise, img)
    tempName = os.path.splitext(
        os.path.basename(image_path))[0] + "_noised.jpg"
    save_image = os.path.join(save_path, tempName)
    cv.SaveImage(save_image, img)
コード例 #4
0
def main(argv):
    if len(argv) < 10:
        print('Usage: %s input-file fx fy cx cy k1 k2 p1 p2 output-file' %
              argv[0])
        sys.exit(-1)

    src = argv[1]
    fx, fy, cx, cy, k1, k2, p1, p2, output = argv[2:]

    intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
    cv.Zero(intrinsics)
    intrinsics[0, 0] = float(fx)
    intrinsics[1, 1] = float(fy)
    intrinsics[2, 2] = 1.0
    intrinsics[0, 2] = float(cx)
    intrinsics[1, 2] = float(cy)

    dist_coeffs = cv.CreateMat(1, 4, cv.CV_64FC1)
    cv.Zero(dist_coeffs)
    dist_coeffs[0, 0] = float(k1)
    dist_coeffs[0, 1] = float(k2)
    dist_coeffs[0, 2] = float(p1)
    dist_coeffs[0, 3] = float(p2)

    src = cv.LoadImage(src)
    dst = cv.CreateImage(cv.GetSize(src), src.depth, src.nChannels)
    mapx = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_32F, 1)
    mapy = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_32F, 1)
    cv.InitUndistortMap(intrinsics, dist_coeffs, mapx, mapy)
    cv.Remap(src, dst, mapx, mapy,
             cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS, cv.ScalarAll(0))
    # cv.Undistort2(src, dst, intrinsics, dist_coeffs)

    cv.SaveImage(output, dst)
コード例 #5
0
 def __init__(self, im, title=None, hist_size=256, color=cv.ScalarAll(0)):
     ranges = [[0, hist_size]]
     self.hist_size = hist_size
     self.color = color
     self.im = im
     self.hist_image = cv.CreateImage((320, 200), 8, 3)
     self.hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1)
     self.title = title
コード例 #6
0
def cutLetters(image, thresh, log):
    mask = createMask(image, thresh)
    log.log(mask)
    h = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    cv.CvtColor(image, image, cv.CV_BGR2HSV)
    cv.Split(image, h, None, None, None)
    log.log(h)
    cv.Set(h, cv.ScalarAll(255), mask)
    return h
コード例 #7
0
def removeLightColors(image):
    b = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    g = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    r = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    cv.Split(image, b, g, r, None)
    cv.Threshold(b, b, 154, 255, cv.CV_THRESH_BINARY)
    cv.Threshold(g, g, 154, 255, cv.CV_THRESH_BINARY)
    cv.Threshold(r, r, 154, 255, cv.CV_THRESH_BINARY)
    cv.Or(b, g, b)
    cv.Or(b, r, b)
    cv.Set(image, cv.ScalarAll(255), b)
    return image
コード例 #8
0
 def on_mouse(self, event, x, y, flags, param):
     pt = (x, y)
     if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON):
         self.prev_pt = None
     elif event == cv.CV_EVENT_LBUTTONDOWN:
         self.prev_pt = pt
     elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) :
         if self.prev_pt:
             for dst in self.dests:
                 cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0)
         self.prev_pt = pt
         cv.ShowImage(self.windowname, img)
コード例 #9
0
ファイル: ocv.py プロジェクト: NidayeCC/Pyocv
def OCVHistogram(frame, ranges=[[0, 256]], hist_size=64):
    """Create a histogram of given frame"""
    if frame.nChannels != 1:
        dest = OCVCopyGrayscale(frame)
    else:
        dest = frame

    hist_image = cv.CreateImage((dest.width, dest.height), 8, 1)
    hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1)

    cv.CalcArrHist([dest], hist)
    (min_value, max_value, _, _) = cv.GetMinMaxHistValue(hist)
    cv.Scale(hist.bins, hist.bins, float(hist_image.height) / max_value, 0)

    cv.Set(hist_image, cv.ScalarAll(255))
    bin_w = round(float(hist_image.width) / hist_size)

    for i in range(hist_size):
        cv.Rectangle(hist_image, (int(i * bin_w), hist_image.height), (int(
            (i + 1) * bin_w), hist_image.height - cv.Round(hist.bins[i])),
                     cv.ScalarAll(0), -1, 8, 0)

    return hist_image
コード例 #10
0
def joinImagesV(images, bgcolor=128):
    totalHeight = sum([image.height for image in images])
    maxWidth = max([image.width for image in images])
    maxNChannels = max([image.nChannels for image in images])
    if maxNChannels == 3:
        images = map(addFakeChannels, images)
        bgcolor = cv.ScalarAll(bgcolor)
    total = len(images)
    result = cv.CreateImage((maxWidth, totalHeight + total - 1), images[0].depth, images[0].nChannels)
    cv.Set(result, bgcolor)
    curH = 0
    for index in xrange(len(images)):
        image = images[index]
        off = (maxWidth - image.width) / 2
        cvext.copyTo(image, result, (off, curH), None)
        curH += image.height + 1
    return result
コード例 #11
0
def joinImagesH(images, bgcolor=128):
    totalWidth = sum([image.width for image in images])
    maxHeight = max([image.height for image in images])
    maxNChannels = max([image.nChannels for image in images])
    if maxNChannels == 3:
        images = map(addFakeChannels, images)
        bgcolor = cv.ScalarAll(bgcolor)
    total = len(images)
    result = cv.CreateImage((totalWidth + total - 1, maxHeight), images[0].depth, images[0].nChannels)
    cv.Set(result, bgcolor)
    curW = 0
    for index in xrange(len(images)):
        image = images[index]
        off = (maxHeight - image.height) / 2
        cvext.copyTo(image, result, (curW, off), None)
        curW += image.width + 1
    return result
コード例 #12
0
ファイル: vectorize.py プロジェクト: wufeng02/deltadraw
    def __refresh_poly(self):
        self.polys_out = cv.ApproxPoly(self.contours, self.a_storage, cv.CV_POLY_APPROX_DP, self.__poly_acc / 100.0, -1)

        # Prints a count of the number of polygons and points in the picture thingy
        con = self.polys_out
        self.pointc = 0
        self.polyc = 0
        while not con == None:
            self.pointc += len(con)
            self.polyc += 1
            con = con.h_next()
        print '\n%d polygons'%self.polyc
        print '%d points'%self.pointc

        cv.Set(self.contour_out, cv.ScalarAll(255))
        cv.DrawContours(self.contour_out, self.polys_out, cv.Scalar(0, 0, 0), cv.Scalar(0, 0, 0), 99)

        cv.ShowImage('Contours', self.contour_out)
コード例 #13
0
    def update(self, im=None):
        if im is not None:
            self.im = im

        cv.CalcArrHist([self.im], self.hist)
        (min_value, max_value, _, _) = cv.GetMinMaxHistValue(self.hist)
        cv.Scale(self.hist.bins, self.hist.bins,
                 float(self.hist_image.height) / max_value, 0)

        cv.Set(self.hist_image, cv.ScalarAll(255))
        bin_w = round(float(self.hist_image.width) / self.hist_size)

        for i in range(self.hist_size):
            cv.Rectangle(self.hist_image,
                         (int(i * bin_w), self.hist_image.height),
                         (int((i + 1) * bin_w), self.hist_image.height -
                          cv.Round(self.hist.bins[i])), self.color, -1, 8, 0)

        cv.ShowImage(self.title, self.hist_image)
コード例 #14
0
 def __init__(self, iplimage):
     # Rough-n-ready but it works dammit
     alpha = cv.CreateMat(iplimage.height, iplimage.width, cv.CV_8UC1)
     cv.Rectangle(alpha, (0, 0), (iplimage.width, iplimage.height),
                  cv.ScalarAll(255), -1)
     rgba = cv.CreateMat(iplimage.height, iplimage.width, cv.CV_8UC4)
     cv.Set(rgba, (1, 2, 3, 4))
     cv.MixChannels(
         [iplimage, alpha],
         [rgba],
         [
             (0, 0),  # rgba[0] -> bgr[2]
             (1, 1),  # rgba[1] -> bgr[1]
             (2, 2),  # rgba[2] -> bgr[0]
             (3, 3)  # rgba[3] -> alpha[0]
         ])
     self.__imagedata = rgba.tostring()
     super(IplQImage, self).__init__(self.__imagedata, iplimage.width,
                                     iplimage.height, QImage.Format_RGB32)
コード例 #15
0
cv.NamedWindow("test1", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("test2", cv.CV_WINDOW_AUTOSIZE)
#cv.NamedWindow("test3", cv.CV_WINDOW_AUTOSIZE)
#cv.NamedWindow("test4", cv.CV_WINDOW_AUTOSIZE)
#cv.NamedWindow("test5", cv.CV_WINDOW_AUTOSIZE)

cv.CvtColor(img, gray, cv.CV_RGB2GRAY)
cv.Not(gray, gray)
cv.Threshold(gray, binary, 20, 255, cv.CV_THRESH_BINARY)

color = 100
color_list = []
for x in range(binary.width):
    for y in range(binary.height):
        if (binary[y, x] > color):
            cv.FloodFill(binary, (x, y), color, cv.ScalarAll(10),
                         cv.ScalarAll(10), 0, msk)
            color_list.append(color)
            color = color + 1

obj = []
l_x = 0
r_x = 0
d_y = 0
u_y = 0
color = 0
for col in range(len(color_list)):
    for x in range(binary.width):
        for y in range(binary.height):
            if (binary[y, x] == color_list[col] and binary[y, x] != color):
                obj.append([x, x, y, y])
コード例 #16
0
def split_captcha(filenameIN):
    threshold = 150
    threshold = 200
    maxValue = 255
    thresholdType = cv.CV_THRESH_BINARY
    srcImg = cv.LoadImage(filenameIN, 1)
    grayThresh = cv.CreateImage((srcImg.width, srcImg.height), cv.IPL_DEPTH_8U,
                                1)
    cv.CvtColor(srcImg, grayThresh, cv.CV_BGR2GRAY)
    cv.Threshold(grayThresh, grayThresh, threshold, maxValue, thresholdType)
    cv.SaveImage((filenameIN + "grayThresh.bmp"), grayThresh)
    connectivity = 4
    CCs4 = []

    gray4 = cv.CloneImage(grayThresh)

    for i in range(gray4.width):
        for j in range(gray4.height):
            if (cv.Get2D(gray4, j, i)[0] == 0):
                cc = CC()
                cc.mask = cv.CreateImage((gray4.width + 2, gray4.height + 2),
                                         cv.IPL_DEPTH_8U, 1)
                cv.Zero(cc.mask)
                cc.comp = cv.FloodFill(gray4, (i, j), cv.Scalar(128),
                                       cv.ScalarAll(0), cv.ScalarAll(0),
                                       connectivity, cc.mask)
                CCs4.append(cc)

    CCs4.sort(cmp=func_compare_area_cc)

    size = len(CCs4)
    for i in range(size):
        if (CCs4[size - 1 - i].comp[0] < 20):
            CCs4.pop()

    connectivity = 8
    CCs8 = []
    gray8 = cv.CloneImage(grayThresh)
    for i in range(gray8.width):
        for j in range(gray8.height):
            if (cv.Get2D(gray8, j, i)[0] == 0):
                cc = CC()
                cc.mask = cv.CreateImage((gray8.width + 2, gray8.height + 2),
                                         cv.IPL_DEPTH_8U, 1)
                cv.Zero(cc.mask)
                cc.comp = cv.FloodFill(gray8, (i, j), cv.Scalar(128),
                                       cv.ScalarAll(0), cv.ScalarAll(0),
                                       connectivity, cc.mask)
                CCs8.append(cc)
    CCs8.sort(cmp=func_compare_area_cc)

    size = len(CCs8)
    for i in range(size):
        if (CCs8[size - 1 - i].comp[0] < 20):
            CCs8.pop()

    CCs = []
    CCs = copy.copy(CCs8)
    # if (len(CCs8) < 3):
    #     CCs = copy.copy(CCs4)
    # else :
    #     if (CCs4[2].comp[0] < 20):
    #         CCs = copy.copy(CCs8)
    #     else:
    #         CCs = copy.copy(CCs4)
    CCs.sort(cmp=func_compare_pos_cc)
    letters = []
    letters_path = []

    for i in range(len(CCs)):
        letter = cv.CreateImage((WIDTH, HEIGHT), cv.IPL_DEPTH_8U, 1)
        cv.Set(letter, 255)
        letters.append(letter)
    for index_image in range(len(letters)):
        letter = letters[index_image]
        cc = CCs[index_image]

        offsetx = (WIDTH - cc.comp[2][2]) / 2
        offsety = (HEIGHT - cc.comp[2][3]) / 2

        for i in range(1, cc.mask.width - 1):
            for j in range(1, cc.mask.height - 1):
                if (cv.Get2D(cc.mask, j, i)[0] == 1):
                    Y = j - cc.comp[2][1] + offsety
                    X = i - cc.comp[2][0] + offsetx

                    if ((X > 0) and (X < WIDTH) and (Y > 0) and (Y < HEIGHT)):
                        cv.Set2D(letter, j - cc.comp[2][1] + offsety,
                                 i - cc.comp[2][0] + offsetx, cv.Scalar(0))
        letters_path.append(filenameIN + str(index_image + 1) + ".bmp")
        cv.SaveImage((filenameIN + str(index_image + 1) + ".bmp"),
                     letters[index_image])
        process_file(letters_path[index_image], WIDTH=31, HEIGHT=31)
    return letters_path
コード例 #17
0
    def transformImage(self,im, use_orig=True, inverse=False):
        ''' 
        Transforms an image into the new coordinate system.
        
        If this image was produced via an affine transform of another image, 
        this method will attempt to trace weak references to the original image 
        and directly compute the new image from that image to improve accuracy.
        To accomplish this a weak reference to the original source image and
        the affine matrix used for the transform are added to any image 
        produced by this method.  This can be disabled using the use_orig 
        parameter.
        
        
        @param im: an Image object
        @param use_orig: (True or False) attempts to find and use the original image as the source to avoid an accumulation of errors.
        @returns: the transformed image
        '''
        #TODO: does not support opencv images.  see Perspective.py
        prev_im = im
        
        if inverse:
            inverse = self.matrix
        else:
            inverse = self.inverse
        
        if use_orig:
            # Find the oldest image used to produce this one by following week 
            # references.

            # Check to see if there is an aff_prev list
            if hasattr(prev_im,'aff_prev'):
            
                # If there is... search that list for the oldest image
                found_prev = False
                for i in range(len(prev_im.aff_prev)):
                    ref,cmat = prev_im.aff_prev[i]
                    if not found_prev and ref():
                        im = ref()
                        mat = np.eye(3)
                        found_prev = True
                        
                    if found_prev:
                        mat = np.dot(mat,cmat)
               
                if found_prev:
                    inverse = np.dot(mat,inverse) 
            
        if im.getType() == TYPE_PIL:
            data = inverse[:2,:].flatten()
            #data = (matrix[0,0],matrix[0,1],matrix[0,2],matrix[1,0],matrix[1,1],matrix[1,2])
            pil = im.asPIL().transform(self.size, AFFINE, data, self.filter)
            result = Image(pil)
        elif im.getType() == TYPE_MATRIX_2D:
            mat = im.asMatrix2D()

            mat = affine_transform(mat, self.inverse[:2,:2], offset=self.inverse[:2,2])
            result = Image(mat)
        elif im.getType() == TYPE_OPENCV:
            matrix = pv.NumpyToOpenCV(self.matrix)
            src = im.asOpenCV()
            dst = cv.CreateImage( (self.size[0],self.size[1]), cv.IPL_DEPTH_8U, src.nChannels );
            cv.WarpPerspective( src, dst, matrix, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS,cv.ScalarAll(128))                    
            result = pv.Image(dst)

        else:
            raise NotImplementedError("Unhandled image type for affine transform.")

        
        # Check to see if there is an aff_prev list for this object
        if use_orig and hasattr(prev_im,'aff_prev'):
            # Create one if not
            result.aff_prev = copy.copy(prev_im.aff_prev)
        else:
            result.aff_prev = []
            
        # Append the prev image and new transform
        result.aff_prev.append( (weakref.ref(prev_im), self.inverse) )
        
        return result
コード例 #18
0
    # use nonzero_rows parameter in cv.FT() call below

    cv.DFT(dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height)

    cv.NamedWindow("win", 0)
    cv.NamedWindow("magnitude", 0)
    cv.ShowImage("win", im)

    # Split Fourier in real and imaginary parts
    cv.Split(dft_A, image_Re, image_Im, None, None)

    # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cv.Pow(image_Re, image_Re, 2.0)
    cv.Pow(image_Im, image_Im, 2.0)
    cv.Add(image_Re, image_Im, image_Re, None)
    cv.Pow(image_Re, image_Re, 0.5)

    # Compute log(1 + Mag)
    cv.AddS(image_Re, cv.ScalarAll(1.0), image_Re, None)  # 1 + Mag
    cv.Log(image_Re, image_Re)  # log(1 + Mag)

    # Rearrange the quadrants of Fourier image so that the origin is at
    # the image center
    cvShiftDFT(image_Re, image_Re)

    min, max, pt1, pt2 = cv.MinMaxLoc(image_Re)
    cv.Scale(image_Re, image_Re, 1.0 / (max - min), 1.0 * (-min) / (max - min))
    cv.ShowImage("magnitude", image_Re)

    cv.WaitKey(0)
コード例 #19
0
    def atualiza_foto(self):
        real = cv.CreateImage(cv.GetSize(imagem), cv.IPL_DEPTH_64F, 1)
        imaginario = cv.CreateImage(cv.GetSize(imagem), cv.IPL_DEPTH_64F, 1)
        complexo = cv.CreateImage(cv.GetSize(imagem), cv.IPL_DEPTH_64F, 2)

        cv.Scale(imagem_cinza, real, 1.0, 0.0)
        cv.Zero(imaginario)
        cv.Merge(real, imaginario, None, None, complexo)

        Altura_M = cv.GetOptimalDFTSize(imagem.height - 1)
        Largura_N = cv.GetOptimalDFTSize(imagem.width - 1)
        Vetor_dft = cv.CreateMat(Altura_M, Largura_N, cv.CV_64FC2)

        imagem_Real = cv.CreateImage((Largura_N, Altura_M), cv.IPL_DEPTH_64F,
                                     1)
        imagem_Imaginaria = cv.CreateImage((Largura_N, Altura_M),
                                           cv.IPL_DEPTH_64F, 1)

        temporario = cv.GetSubRect(Vetor_dft,
                                   (0, 0, imagem.width, imagem.height))
        cv.Copy(complexo, temporario, None)
        if (Vetor_dft.width > imagem.width):
            temporario = cv.GetSubRect(
                Vetor_dft,
                (imagem.width, 0, Largura_N - imagem.width, imagem.height))
            cv.Zero(temporario)

        # APLICANDO FOURIER

        cv.DFT(Vetor_dft, Vetor_dft, cv.CV_DXT_FORWARD, complexo.height)

        cv.Split(Vetor_dft, imagem_Real, imagem_Imaginaria, None, None)

        cv.Pow(imagem_Real, imagem_Real, 2.0)
        cv.Pow(imagem_Imaginaria, imagem_Imaginaria, 2.0)
        cv.Add(imagem_Real, imagem_Imaginaria, imagem_Real, None)
        cv.Pow(imagem_Real, imagem_Real, 0.5)

        cv.AddS(imagem_Real, cv.ScalarAll(1.0), imagem_Real, None)
        cv.Log(imagem_Real, imagem_Real)

        cvShiftDFT(imagem_Real, imagem_Real)
        min, max, pt1, pt2 = cv.MinMaxLoc(imagem_Real)
        cv.Scale(imagem_Real, imagem_Real, 1.0 / (max - min),
                 1.0 * (-min) / (max - min))

        #APLICANDO FILTRO passa-baixa circular

        cv.Circle(Vetor_dft, (0, 0), self.raio, [0, 0, 0], -1, 1, 0)
        cv.Circle(Vetor_dft, (Vetor_dft.cols, 0), self.raio, [0, 0, 0], -1, 1,
                  0)
        cv.Circle(Vetor_dft, (0, Vetor_dft.rows), self.raio, [0, 0, 0], -1, 1,
                  0)
        cv.Circle(Vetor_dft, (Vetor_dft.cols, Vetor_dft.rows), self.raio,
                  [0, 0, 0], -1, 1, 0)

        cv.Split(Vetor_dft, imagem_Real, imagem_Imaginaria, None, None)
        cv.Pow(imagem_Real, imagem_Real, 2.0)
        cv.Pow(imagem_Imaginaria, imagem_Imaginaria, 2.0)
        cv.Add(imagem_Real, imagem_Imaginaria, imagem_Real, None)
        cv.Pow(imagem_Real, imagem_Real, 0.5)
        cv.AddS(imagem_Real, cv.ScalarAll(1.0), imagem_Real, None)
        cv.Log(imagem_Real, imagem_Real)
        cvShiftDFT(imagem_Real, imagem_Real)
        min, max, pt1, pt2 = cv.MinMaxLoc(imagem_Real)
        cv.Scale(imagem_Real, imagem_Real, 1.0 / (max - min),
                 1.0 * (-min) / (max - min))

        cv.ShowImage("Transformada de Fourier", imagem_Real)

        # APLICANDO A INVERSA de Fourier

        cv.DFT(Vetor_dft, Vetor_dft, cv.CV_DXT_INVERSE_SCALE, Largura_N)
        cv.Split(Vetor_dft, imagem_Real, imagem_Imaginaria, None, None)
        min, max, pt1, pt2 = cv.MinMaxLoc(imagem_Real)
        if ((pt1 < 0) or (pt2 > 255)):
            cv.Scale(imagem_Real, imagem_Real, 1.0 / (max - min),
                     1.0 * (-min) / (max - min))
        else:
            cv.Scale(imagem_Real, imagem_Real, 1.0 / 255, 0)

        cv.ShowImage("Inversa da Fourier", imagem_Real)
コード例 #20
0
 def undistort_frame(self):
     img = self.convert_color()
     cv.Remap(img, self.undistort_image, self.undistort_mapx,
              self.undistort_mapy, cv.CV_INTER_LINEAR, cv.ScalarAll(0))
     return self.undistort_image
コード例 #21
0
                              random.randrange(0, 100) * 0.05 + 0.01,
                              random.randrange(0, 5) * 0.1,
                              random.randrange(0, 10),
                              line_type)

        cv.PutText(image, "Testing text rendering!",
                      pt1, font,
                      random_color(random))
        
        cv.ShowImage(window_name, image)
        cv.WaitKey(delay)

    # prepare a text, and get it's properties
    font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX,
                          3, 3, 0.0, 5, line_type)
    text_size, ymin = cv.GetTextSize("OpenCV forever!", font)
    pt1 = ((width - text_size[0]) / 2, (height + text_size[1]) / 2)
    image2 = cv.CloneImage(image)

    # now, draw some OpenCV pub ;-)
    for i in range(0, 512, 2):
        cv.SubS(image2, cv.ScalarAll(i), image)
        (r, g, b) = colorsys.hsv_to_rgb((i % 100) / 100., 1, 1)
        cv.PutText(image, "OpenCV forever!",
                      pt1, font, cv.RGB(255 * r, 255 * g, 255 * b))
        cv.ShowImage(window_name, image)
        cv.WaitKey(delay)

    # wait some key to end
    cv.WaitKey(0)
コード例 #22
0
def invert(im):
    inv = new_from(im)
    cv.SubRS(im, cv.ScalarAll(int('1' * im.depth, 2)), inv)
    return inv
コード例 #23
0
intrinsics[1, 2] = float(3288 / 2)

dist_coeffs = cv.CreateMat(1, 4, cv.CV_64FC1)
cv.Zero(dist_coeffs)
dist_coeffs[0, 0] = float(-1)
dist_coeffs[0, 1] = float(-1)  #loat(0.0193617)
dist_coeffs[0, 2] = float(-.1)  #float(-0.002004)
dist_coeffs[0, 3] = float(-.1)  #float(-0.002056)
#End Camera Matrix

allFiles = []
for root, dirs, files in os.walk(startdir + "/"):
    allFiles += [os.path.join(root, name) for name in files if ".jpg" in name]

for im in allFiles:

    #src = "2015-03-07 11.07.16.jpg"
    src = cv.LoadImage(im)
    size = cv.GetSize(src)
    s = (int(size[0] * 0.9), int(size[1] * 0.9))

    res = cv.CreateImage(s, src.depth, src.nChannels)
    im1, im2 = cv.CreateImage(s, cv.IPL_DEPTH_32F,
                              1), cv.CreateImage(s, cv.IPL_DEPTH_32F, 1)

    cv.InitUndistortMap(intrinsics, dist_coeffs, im1, im2)
    cv.Remap(src, res, im1, im2, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS,
             cv.ScalarAll(0))
    print im.strip(".jpg") + "_nodistort.jpg"
    cv.SaveImage(im.strip(".jpg") + "_nodistort.jpg", res)
コード例 #24
0
def undistort(image):
    result = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)
    cv.Remap(image, result, mapX, mapY,
             cv.CV_INTER_CUBIC + cv.CV_WARP_FILL_OUTLIERS, cv.ScalarAll(0))
    return result
コード例 #25
0
        if c == ord('w'):
            storage = cv.CreateMemStorage(0)
            #cv.SaveImage("wshed_mask.png", marker_mask)
            #marker_mask = cv.LoadImage("wshed_mask.png", 0)
            contours = cv.FindContours(marker_mask, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            def contour_iterator(contour):
                while contour:
                    yield contour
                    contour = contour.h_next()

            cv.Zero(markers)
            comp_count = 0
            for c in contour_iterator(contours):
                cv.DrawContours(markers,
                                c,
                                cv.ScalarAll(comp_count + 1),
                                cv.ScalarAll(comp_count + 1),
                                -1,
                                -1,
                                8)
                comp_count += 1

            cv.Watershed(img0, markers)

            cv.Set(wshed, cv.ScalarAll(255))

            # paint the watershed image
            color_tab = [(cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50) for i in range(comp_count)]
            for j in range(markers.height):
                for i in range(markers.width):
                    idx = markers[j, i]