Beispiel #1
0
 def segment(self):
     algorithm = self.algorithmComboBox.currentText()
     imgPath = str(self.userpath.text())
     index = self.featureComboBox.currentIndex()
     features = [
         "INTENSITY", "INTENSITY+LOC", "RGB", "YUV", "LM", "ILM", "PCA"
     ]
     if algorithm == "K-means":
         k = int(self.kText.text())
         iterations = int(self.iterationsText.text())
         epsilon = float(self.epsilonText.text())
         print imgPath, index, k, iterations, epsilon
         org = cv.LoadImageM(imgPath)
         im = kmeans.kmeans(imgPath, features[index], k, iterations,
                            epsilon)
         cv.ShowImage("original", org)
         cv.ShowImage("segmented", im)
     elif algorithm == "Mean Shift":
         if index == 4:
             QtGui.QMessageBox.information(
                 self, 'Error', 'LM is not supported in mean shift')
             return
         print imgPath, features[index]
         org = cv.LoadImageM(imgPath)
         im = meanshift.meanshift(imgPath, features[index])
         cv.ShowImage("original", org)
         cv.ShowImage("segmented", im)
def meanshiftUsingIntensityAndLocation(path):
    im = cv.LoadImageM(path, cv.CV_LOAD_IMAGE_GRAYSCALE)
    #creat a mat from the pixel intensity and its location
    mat = cv.LoadImageM(path)
    for i in xrange(0, im.height):
        for j in xrange(0, im.width):
            value = (im[i, j], i, j)
            mat[i, j] = value

#print mat[i,j]

    (segmentedImage, labelsImage, numberRegions) = pms.segmentMeanShift(mat)

    clusters = {}
    for i in xrange(0, labelsImage.height):
        for j in xrange(0, labelsImage.width):
            v = labelsImage[i, j]
            if v in clusters:
                clusters[v].append(im[i, j])
            else:
                clusters[v] = [im[i, j]]

    means = {}
    for c in clusters:
        means[c] = sum(clusters[c]) / len(clusters[c])

    for i in xrange(0, im.height):
        for j in xrange(0, im.width):
            lbl = labelsImage[i, j]
            im[i, j] = means[lbl]

    print "number of region", numberRegions
    return im
Beispiel #3
0
def fast_imread(imgpath, flatten=True, dtype='uint8'):
    if flatten:
        Icv = cv.LoadImageM(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        Icv = cv.LoadImageM(imgpath, cv.CV_LOAD_IMAGE_COLOR)
    Inp = np.asarray(Icv, dtype=dtype)
    return Inp
Beispiel #4
0
def traitements(img):

	src = cv.LoadImageM(norm_path+img, 1)
	dst = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_16S, 3)

	# --- Corners --- #
	print "CORNERS"
	#eig_image = cv.CreateMat(src.rows, src.cols, cv.CV_32FC1)
	#temp_image = cv.CreateMat(src.rows, src.cols, cv.CV_32FC1)
	#corners = cv.GoodFeaturesToTrack(src, eig_image, temp_image, 100, 0.04, 1.0, useHarris = True)
	#affichage_corners(corners, src, 2) 
	#save(traitement_path, img, src)
	print "FIN CORNERS"

	# --- Seuil --- #
	print "SEUIL"
	src = cv.LoadImageM(norm_path+img, cv.CV_LOAD_IMAGE_GRAYSCALE)
	cv.AdaptiveThreshold(src,src,255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, 7, 10)
	#cv.Erode(src,src,None,1)
	#cv.Dilate(src,src,None,1)
	print src[56,56]
	save(traitement_path, dossier+"seuil."+img, src)
	print "FIN SEUIL"

	'''
Beispiel #5
0
def build_problem(img_kind, subdir = "data/"):
	subdir = "data/"

	classes = []
	data = []

	the_ones = glob.glob(subdir + "f_" + img_kind + "*.jpg")
	all_of_them = glob.glob(subdir + "f_*_*.jpg")
	the_others = []

	for x in all_of_them:
		if the_ones.count(x) < 1:
			the_others.append(x)
	
	for x in the_ones:
		classes.append(1)
		data.append(get_image_features(cv.LoadImageM(x), True, img_kind))
	
	for x in the_others:
		classes.append(-1)
		data.append(get_image_features(cv.LoadImageM(x), True, img_kind))

	prob = svm.svm_problem(classes, data)

	return prob
Beispiel #6
0
def main():
    imgNormalP = 'contest_oc.png'
    imgRotatedP = 'contest_oc_rot.png'
    
    imgA = cv.LoadImageM(imgNormalP, cv.CV_LOAD_IMAGE_GRAYSCALE)
    imgB = cv.LoadImageM(imgRotatedP, cv.CV_LOAD_IMAGE_GRAYSCALE)
    
    # A is Normal, B is Rotated
    # 0.) Get corners (upperleft,upperright,lowerleft,lowerright)
    # pA_0 corresponds to pB_0, etc...
    pA_0 = (28, 29)             # UpperLeft
    pA_1 = (509, 30)            # UpperRight    
    pA_2 = (29, 531)            # LowerLeft    
    pA_3 = (509, 530)           # LowerRight
    
    pB_0 = (109, 77)            # UpperLeft
    pB_1 = (587, 110)           # UpperRight
    pB_2 = (76, 578)            # LowerLeft
    pB_3 = (556, 606)           # LowerRight

    imgB_align, M = align_affine(imgA, imgB, (pA_0, pA_1, pA_2),
                                (pB_0, pB_1, pB_2))

    cv.SaveImage("_imgB_align.png", imgB_align)
    
    print "Done -- saved aligned image to: _imgB_align.png."
    print
    print "AffineMat found:"
    print np.array(M)
    
    print
    
    print "Note: You'll notice that _imgB_align.png still isn't \
def main():
    if len(sys.argv) < 1:
        print "Come on, give me some files to play with"
        return

    print "Reading image " + sys.argv[1]

    incoming = cv.LoadImageM(sys.argv[1])
    w, h = (incoming.cols, incoming.rows)
    nw, nh = (int(w * 1.5 + 0.5), int(h * 1.5 + 0.5))
    img = cv.CreateImage(cv.GetSize(incoming), cv.IPL_DEPTH_32F, 3)
    cv.Convert(incoming, img)

    n = 0
    for f in sys.argv[1:]:
        incoming = cv.LoadImageM(f)
        w, h = (incoming.cols, incoming.rows)
        nw, nh = (int(w * 1.5 + 0.5), int(h * 1.5 + 0.5))
        new = cv.CreateImage(cv.GetSize(incoming), cv.IPL_DEPTH_32F, 3)
        cv.Convert(incoming, new)

        n += 1
        print "Read in image [%04d] [%s]" % (n, f)
        img = imageBlend(img, new, 1.0 / n)

        del (new)

    out = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_16U, 3)
    cv.ConvertScale(img, out, 256.)
    cv.SaveImage("out-16-up.png", out)
    print "Written out-16-up.png"
Beispiel #8
0
    def loadImages(self, left_file, right_file, im1, im2):
        im1_int = cv.LoadImageM(self.left_folder + '/' + left_file, 0)
        im2_int = cv.LoadImageM(self.right_folder + '/' + right_file, 0)

        cv.ConvertScale(im1_int, im1, 1. / 255)
        cv.ConvertScale(im2_int, im2, 1. / 255)

        self.rig.stereoRectify1(im1, im1)
        self.rig.stereoRectify2(im2, im2)
Beispiel #9
0
def compute_flow_opencv(alpha, iterations, ifile1, ifile2):
  import cv
  i1 = cv.LoadImageM(os.path.join("flow", ifile1), iscolor=False)
  i2 = cv.LoadImageM(os.path.join("flow", ifile2), iscolor=False)
  u = cv.CreateMat(i1.rows, i1.cols, cv.CV_32F)
  cv.SetZero(u)
  v = cv.CreateMat(i1.rows, i1.cols, cv.CV_32F)
  cv.SetZero(v)
  l = 1.0/(alpha**2)
  cv.CalcOpticalFlowHS(i1, i2, 0, u, v, l, (cv.CV_TERMCRIT_ITER, iterations, 0))
  # return blitz arrays
  return numpy.array(u, 'float64'), numpy.array(v, 'float64')
Beispiel #10
0
def pca_test(img_kind):
	import pylab as pl
	from mpl_toolkits.mplot3d import Axes3D

	subdir = "data/"

	classes = []
	data = []

	the_ones = glob.glob(subdir + "f_" + img_kind + "*.jpg")
	all_of_them = glob.glob(subdir + "f_*_*.jpg")
	the_others = []

	for x in all_of_them:
		if the_ones.count(x) < 1:
			the_others.append(x)
	
	for x in the_ones:
		classes.append(1)
		data.append(get_image_features(cv.LoadImageM(x)))
	
	for x in the_others:
		classes.append(-1)
		data.append(get_image_features(cv.LoadImageM(x)))
	
	pca = PCA(46, whiten=True)
	print 'fiting'
	pca.fit(data)
	print 'transforming'
	X_r = pca.transform(data)
	print '----'

	print X_r.shape

	x0 = [x[0] for x in X_r]
	x1 = [x[1] for x in X_r]

	pl.figure()

	for i in xrange(0,len(x0)):
		if classes[i] == 1:
			pl.scatter(x0[i], x1[i], c = 'r')
		else:
			pl.scatter(x0[i], x1[i], c = 'b')
	

	
	# for c, i, target_name in zip("rg", [1, -1], target_names):
	#     pl.scatter(X_r[classes == i, 0], X_r[classes == i, 1], c=c, label=target_name)
	pl.legend()
	pl.title('PCA of dataset')

	pl.show()
Beispiel #11
0
def test_model(img_kind):
	subdir = "data/"
	model = svmutil.svm_load_model(subdir + img_kind + '.model')
	print "Finished Loading Model"

	total_count = 0
	correct_count = 0
	wrong_count = 0

	
	the_ones = glob.glob(subdir + "f_" + img_kind + "*.jpg")
	all_of_them = glob.glob(subdir + "f_*_*.jpg")
	the_others = []

	for x in all_of_them:
		total_count += 1
		if the_ones.count(x) < 1:
			the_others.append(x)
	
	for x in the_ones:
		img = cv.LoadImageM(x)
		cv.ShowImage("img", img)
		cv.WaitKey(10)
		img_features = get_image_features(img, True, img_kind)
		predict_input_data = []
		predict_input_data.append(img_features)
		(val, val_2, val_3) = svmutil.svm_predict([1], predict_input_data, model)
		if int(val[0]) == 1:
			print 'correct'
			correct_count += 1
		else:
			wrong_count += 1

	for x in the_others:
		img = cv.LoadImageM(x)
		cv.ShowImage("img", img)
		cv.WaitKey(10)
		img_features = get_image_features(img, True, img_kind)
		predict_input_data = []
		predict_input_data.append(img_features)
		(val, val_2, val_3) = svmutil.svm_predict([1], predict_input_data, model)
		if int(val[0]) == -1:
			correct_count += 1
		else:
			wrong_count += 1
	
	print "Total Pictures: " + str(total_count)
	print "Correct: " + str(correct_count)
	print "Wrong: " + str(wrong_count)
	print "Accuracy: " + str(correct_count/float(total_count) * 100) + '%'
Beispiel #12
0
    def __init__(self, n, n_test, n_batches, add_lower50='y', make_testset='y'):
        self.onOpen()
        self.makeDest()
        self.get_tracenames()

        # get an image and open it to see the size
        img = cv.LoadImageM(self.datafiles[0], iscolor=False)
        self.csize = shape(img)
        self.img = asarray(img)
        
        #open up the ROI_config.txt and parse
        self.pathtofiles = '/'.join(self.datafiles[0].split('/')[:-1]) + '/'
        self.config = self.pathtofiles + 'ROI_config.txt'
        if (os.path.isfile(self.config)):
            print "Found ROI_config.txt"
            c = open(self.config, 'r').readlines()
            self.top = int(c[1][:-1].split('\t')[1])
            self.bottom = int(c[2][:-1].split('\t')[1])
            self.left = int(c[3][:-1].split('\t')[1])
            self.right = int(c[4][:-1].split('\t')[1])
            print "using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right)
        else:
            print "ROI_config.txt not found"
            self.top = 140 #default settings for the Sonosite Titan
            self.bottom = 320
            self.left = 250
            self.right = 580
            print "using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right)
        
        roi = img[self.top:self.bottom, self.left:self.right]
        self.roisize = shape(roi)
        
        self.get_diverse(n, n_test, n_batches, add_lower50, make_testset)
Beispiel #13
0
    def compute_num_faces_from_url(self, url):
        num_faces = None
        is_bright = None
        num_retries = 0
        with tempfile.NamedTemporaryFile(dir='/tmp/') as fh:
            while num_faces is None and num_retries < 3:
                try:
                    logging.info(url)
                    urlfile = urllib2.urlopen(url, timeout=10)
                    fh.write(urlfile.read())
                    fh.flush()

                    is_bright = self.compute_is_bright(fh)
                    image = cv.LoadImageM(fh.name, cv.CV_LOAD_IMAGE_GRAYSCALE)
                    num_faces = self.compute_num_faces(image)
                    logging.info('Number of faces: %d' % num_faces)
                except Exception as e:
                    logging.error('Exception caught: %s' % str(e))
                    traceback.print_exc()
                    time.sleep(2**(num_retries + 1))
                num_retries += 1

        if num_faces:
            logging.info('Face found in %s' % url)

        if num_faces is None or is_bright is None:
            return None

        return {
            'num_faces': num_faces,
            'is_bright': is_bright,
        }
Beispiel #14
0
 def browse2(self):
     filePath = QtGui.QFileDialog.getOpenFileName(self, "Find Files",
                                                  QtCore.QDir.currentPath())
     self.userpath2.setText(filePath)
     img = cv.LoadImageM(str(filePath))
     cv.NamedWindow("Output", 1)
     cv.ShowImage("Output", img)
    def onMenuItemOpenTemplateActivate(self, widget):

        if self.targetImageGray == None:
            print "Error: Must load target image first"
            return

        filename = self.chooseImageFile()

        if filename != None:
            # Load in the template image
            imageCV = cv.LoadImageM(filename)

            # Check that it has the same dimensions as the target image
            if imageCV.width != self.targetImageGray.shape[ 1 ] \
                or imageCV.height != self.targetImageGray.shape[ 0 ]:

                print "Error: The template image must have the same dimensions as the target image"
                return

            # Convert to grayscale and display
            self.templateImageGray = np.ndarray(
                (imageCV.height, imageCV.width), dtype=np.uint8)
            cv.CvtColor(imageCV, self.templateImageGray, cv.CV_BGR2GRAY)
            self.dwgTemplateImageDisplay.setImageFromNumpyArray(
                self.templateImageGray)
            self.lblTemplateName.set_text(os.path.split(filename)[1])

            # Merge the images
            self.mergeImages()
    def onMenuItemOpenTargetActivate(self, widget):

        filename = self.chooseImageFile()

        if filename != None:
            # Load in the target image and convert to grayscale
            imageCV = cv.LoadImageM(filename)
            self.targetImageGray = np.ndarray((imageCV.height, imageCV.width),
                                              dtype=np.uint8)
            cv.CvtColor(imageCV, self.targetImageGray, cv.CV_BGR2GRAY)

            # Display the image
            self.dwgTargetImageDisplay.setImageFromNumpyArray(
                self.targetImageGray)
            self.lblTargetName.set_text(os.path.split(filename)[1])

            # Clear the template image
            self.templateImageGray = np.zeros(self.targetImageGray.shape,
                                              dtype=np.uint8)
            self.dwgTemplateImageDisplay.setImageFromNumpyArray(
                self.templateImageGray)
            self.lblTemplateName.set_text("")

            # Merge the images
            self.mergeImages()
Beispiel #17
0
def meanshiftUsingPCA(path):
    # Load original image given the image path
    im = cv.LoadImageM(path)
    #convert image to YUV color space
    cv.CvtColor(im, im, cv.CV_BGR2YCrCb)
    # Load bank of filters
    filterBank = lmfilters.loadLMFilters()
    # Resize image to decrease dimensions during clustering
    resize_factor = 1
    thumbnail = cv.CreateMat(im.height / resize_factor,
                             im.width / resize_factor, cv.CV_8UC3)
    cv.Resize(im, thumbnail)
    # now work with resized thumbnail image
    response = np.zeros(shape=((thumbnail.height) * (thumbnail.width), 51),
                        dtype=float)
    for f in xrange(0, 48):
        filter = filterBank[f]
        # Resize the filter with the same factor for the resized image
        dst = cv.CreateImage(cv.GetSize(thumbnail), cv.IPL_DEPTH_32F, 3)
        resizedFilter = cv.CreateMat(filter.height / resize_factor,
                                     filter.width / resize_factor, filter.type)
        cv.Resize(filter, resizedFilter)
        # Apply the current filter
        cv.Filter2D(thumbnail, dst, resizedFilter)
        for j in xrange(0, thumbnail.height):
            for i in xrange(0, thumbnail.width):
                # Select the max. along the three channels
                maxRes = max(dst[j, i])
                if math.isnan(maxRes):
                    maxRes = 0.0
                if maxRes > response[thumbnail.width * j + i, f]:
                    # Store the max. response for the given feature index
                    response[thumbnail.width * j + i, f] = maxRes

    #YUV features
    count = 0
    for j in xrange(0, thumbnail.height):
        for i in xrange(0, thumbnail.width):
            response[count, 48] = thumbnail[j, i][0]
            response[count, 49] = thumbnail[j, i][1]
            response[count, 50] = thumbnail[j, i][2]
            count += 1

    #get the first 4 primary components using pca
    pca = PCA(response)
    pcaResponse = zeros([thumbnail.height * thumbnail.width, 4])

    for i in xrange(0, thumbnail.height * thumbnail.width):
        pcaResponse[i] = pca.getPCA(response[i], 4)

    # Create new mean shift instance
    ms = MeanShift(bandwidth=10, bin_seeding=True)
    # Apply the mean shift clustering algorithm
    ms.fit(pcaResponse)
    labels = ms.labels_
    n_clusters_ = np.unique(labels)
    print "Number of clusters: ", len(n_clusters_)
    repaintImage(thumbnail, labels)
    cv.Resize(thumbnail, im)
    return im
Beispiel #18
0
def partitionImage(image_path, NUM_PARTITIONS=5):
    img = cv.LoadImageM(image_path)
    height = img.rows
    width = img.cols
    width_partition = width / NUM_PARTITIONS
    height_partition = height / NUM_PARTITIONS
    y_cur = 0
    x_cur = 0
    partitions = []
    for i in range(0, NUM_PARTITIONS):  #itera linhas
        for j in range(0, NUM_PARTITIONS):  #itera colunas
            if j == NUM_PARTITIONS - 1:  #se ultima coluna
                how_much_left_width = width - x_cur
                if i == NUM_PARTITIONS - 1:  #se ultima coluna e ultima linha
                    how_much_left_height = height - y_cur
                    partitions.append(
                        cv.GetSubRect(img, (x_cur, y_cur, how_much_left_width,
                                            how_much_left_height)))
                else:  #so ultima coluna
                    partitions.append(
                        cv.GetSubRect(img, (x_cur, y_cur, how_much_left_width,
                                            height_partition)))
            else:
                partitions.append(
                    cv.GetSubRect(
                        img,
                        (x_cur, y_cur, width_partition, height_partition)))
            x_cur = x_cur + width_partition  #atualiza posicao da coluna (x)
        y_cur = y_cur + height_partition  #atualiza posicao da linha (y)
        x_cur = 0  #reseta a posicao da coluna (x = 0)
    return partitions
Beispiel #19
0
 def __init__(self):
     cv2.namedWindow('CaptureMouse')
     cv.MoveWindow('CaptureMouse', 0, 0)
     cv2.setMouseCallback('CaptureMouse', self.onmouse)
     self.drag_start = False
     self.image = cv.LoadImageM("blank_800x800.jpg")
     self.pointlist = []
Beispiel #20
0
def main():
    os.chdir(os.path.join(sys.argv[1], "motion"))
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except OSError:
        pass

    #os.system("del klein\\*.png")
    os.system("convert motion_*.png -adaptive-resize 500x500! " +
              OUTPUT_DIR_NAME + "\\motion_%02d.png")

    os.chdir(OUTPUT_DIR_NAME)
    os.system("convert motion_*.png -append result.png")

    img = cv.LoadImageM("result.png")
    values = []

    for y in range(img.rows):
        value = cv.Get1D(cv.GetRow(img, y), 0)[0]
        values.append(value)

    values.sort(reverse=True)

    output_img = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
    for y in range(img.rows):
        for x in range(img.cols):
            cv.Set2D(output_img, y, x, cv.RGB(values[y], values[y], values[y]))

    cv.SaveImage("result_sorted.png", output_img)

    raw_input("- done -")
    return
Beispiel #21
0
    def __load_image(self, image_name, newX):
        orig = cv.LoadImageM(image_name)
        self.__newX = newX
        self.__newY = int(float(newX) / orig.cols * orig.rows)
        self.__init_mem(newX, self.__newY)

        cv.Resize(orig, self.res)
        cv.CvtColor(self.res, self.__res_gray, cv.CV_RGB2GRAY)
Beispiel #22
0
def kmeans(name, feature, k, iterations, epsilon):
    print feature
    start = time.time()
    im = None
    if feature == "INTENSITY":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_GRAYSCALE)
        kmeansUsingIntensity(im, k, iterations, epsilon)
    elif feature == "INTENSITY+LOC":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_GRAYSCALE)
        kmeansUsingIntensityAndLocation(im, k, iterations, epsilon)
    elif feature == "RGB":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_COLOR)
        kmeansUsingRGB(im, k, iterations, epsilon)
    elif feature == "YUV":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_COLOR)
        kmeansUsingYUV(im, k, iterations, epsilon)
    elif feature == "LM":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_COLOR)
        kmeansUsingLM(im, k, iterations, epsilon)
    elif feature == "ILM":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_COLOR)
        kmeansUsingILM(im, k, iterations, epsilon)
    elif feature == "PCA":
        im = cv.LoadImageM(name, cv.CV_LOAD_IMAGE_COLOR)
        kmeansUsingPCA(im, k, iterations, epsilon)

    end = time.time()

    print "time", end - start, "seconds"

    return im
Beispiel #23
0
    def __init__(self):

        #Dictionary containing the threshold values that the sliders correspond
        #to, each is initially set to 128, the mid value.
        self.thresholds = {'low_red': 128, 'high_red': 128,\
                           'low_green': 128, 'high_green': 128,\
                           'low_blue': 128, 'high_blue': 128,\
                           'low_hue': 128, 'high_hue': 128,\
                           'low_sat': 128, 'high_sat': 128,\
                           'low_val': 128, 'high_val': 128}

        #Set up the windows containing the image from the kinect, the altered
        #image, and the threshold sliders.
        cv.NamedWindow('image')
        cv.MoveWindow('image', 320, 0)
        cv.NamedWindow('threshold')
        cv.MoveWindow('threshold', 960, 0)
        cv.NamedWindow('hsv')
        cv.MoveWindow('hsv', 320, 450)

        self.make_slider_window()

        #self.bridge = cv_bridge.CvBridge()

        #self.capture = cv.CaptureFromCAM(0)
        #self.image = cv.QueryFrame(self.capture)

        self.image_orig = cv.LoadImageM('room1.jpg')
        self.image_color = cv.LoadImageM('room1.jpg')
        self.image = cv.LoadImageM('room1.jpg')

        self.size = cv.GetSize(self.image)
        self.hsv = cv.CreateImage(self.size, 8, 3)

        cv.SetMouseCallback('image', self.mouse_callback, True)

        self.RGBavgs = ()
        self.RGBvals = []

        self.HSVavgs = ()
        self.HSVvals = []

        self.new_color = (128, 0, 0)
        self.new_hsv = (0, 255, 255)
        self.hue = 0
Beispiel #24
0
def edgeConv(StrImg):
    image = cv.LoadImageM(StrImg)
    gray = cv.CreateImage(cv.GetSize(image), 8, 1)
    edges = cv.CreateImage(cv.GetSize(image), 8, 1)

    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)
    cv.Canny(gray, edges, 50, 200)
    #print edges[399,699]
    return edges
Beispiel #25
0
def make_histogram(imagefile):
    col = cv.LoadImageM(imagefile)
    gray = cv.CreateImage(cv.GetSize(col), cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(col, gray, cv.CV_RGB2GRAY)

    hist = cv.CreateHist([NUM_BINS], cv.CV_HIST_ARRAY, [[0, 255]], 1)
    cv.CalcHist([gray], hist)
    cv.NormalizeHist(hist, 1.0)
    return hist
Beispiel #26
0
def test_second_stage():
    folder = 'data/'

    print '-------'
    inputFile = 'johns.jpg'
    img = cv.LoadImageM(folder + inputFile)
    slant = getslant(img)
    dests = second_stage_classification(folder + slant + '/', img)
    print dests

    return dests
Beispiel #27
0
def detect_image_faces(image_file, cascade):
    # Detect all faces in image:
    try:
        image = cv.LoadImageM(image_file, cv.CV_LOAD_IMAGE_GRAYSCALE)
        faces = cv.HaarDetectObjects( \
          image, cascade, cv.CreateMemStorage(0), scale_factor = 1.2, \
          min_neighbors = 2, flags = 0, min_size = (20, 20))
    except:
        faces = []

    return faces
Beispiel #28
0
def standardImread_v2(imgpath, flatten=False, dtype='float32', normalize=True):
    """ Reads in IMGPATH, and outputs a numpy array with pix. If normalize
    is True, then intensity values will be floats from [0.0, 1.0]. O.w.
    will be ints from [0, 255].
    """
    cvmode = cv.CV_LOAD_IMAGE_GRAYSCALE if flatten else cv.CV_LOAD_IMAGE_COLOR
    Icv = cv.LoadImageM(imgpath, cvmode)
    Inp = np.asarray(Icv, dtype=dtype)
    if normalize:
        Inp = Inp / 255.0
    return Inp
Beispiel #29
0
    def __load_image(self, image_name):
        orig = cv.LoadImageM(image_name)
        self.__gray = cv.CreateMat(orig.rows, orig.cols, cv.CV_8UC1)
        cv.CvtColor(orig, self.__gray, cv.CV_RGB2GRAY)

        if orig.cols > orig.rows:
            self.__draww = float(self.__plotw)
            self.__drawh = float(orig.rows) / orig.cols * self.__plotw
        else:
            self.__draww = float(orig.cols) / orig.rows * self.__ploth
            self.__drawh = float(self.__ploth)
Beispiel #30
0
 def get_average_image(self):
     files = self.datafiles        
     ave_img = zeros(self.roisize)
     for i in range(len(files)):
         img = cv.LoadImageM(files[i], iscolor=False)
         roi = img[self.top:self.bottom, self.left:self.right]
         roi = asarray(roi)/255.
         ave_img += roi
     ave_img /= len(files)    
 
     return ave_img, files