예제 #1
0
파일: util.py 프로젝트: janfrs/kwc-ros-pkg
def get_sub_image( image, coord, sizex=10, sizey=10, save=True ):

        high_x = coord.x + int(round(sizex/2.0))
        low_x  = coord.x - int(sizex/2.0)
        high_y = coord.y + int(round(sizey/2.0))
        low_y  = coord.y - int(sizey/2.0)

        if high_x > image.width - 1 :
            high_x = image.width - 1
            low_x  = image.width - 1 - sizex
        elif low_x < 0 :
            low_x  = 0
            high_x = sizex - 1
        
        if high_y > image.height - 1 :
            high_y = image.height - 1
            low_y  = image.height - 1 - sizey
        elif low_y < 0 :
            low_y  = 0
            high_y = sizey - 1

        b = image[low_y:high_y,low_x:high_x]

        if save:
            curtime = time.localtime()
	    curtime_raw = time.time()
	    i = float( 100*(curtime_raw - int(curtime_raw)))
            date_name = time.strftime('%Y%m%d%I%M%S_' + str(i), curtime)
            highgui.cvSaveImage( date_name+'dot.png' , b )

        return {'sub_img':b,'sub_img_top_left': cv.cvPoint(low_x,low_y) }
예제 #2
0
def main():
  
    usage = "%prog [options] <imgfile>"
    version = "%prog 0.2\n Longbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image')
    oparser.add_option('-m', '--drawnumber', action="store_true", dest = 'drawnumber', default = False, help = 'display the point numbers')
    oparser.add_option('-n', '--number', dest = 'num', type='int',default = 200 , help = 'the number of feature points')
    oparser.add_option('-t', '--threshold', dest = 'threshold', type='int',default = 100 , help = 'the threshold for image binarification')
    oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file')
    oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file')

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 2:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)
        
    ct = ExtractMSS()
    ct.GetContour(args[1], options)

    if (options.display):
        ct.start = options.threshold
        ct.DrawKeyPoints()
        highgui.cvNamedWindow ("contour", 1)
        highgui.cvShowImage ("contour", ct.drawimg)
        highgui.cvWaitKey (0)       

    if (options.output):
        ct.mss.save(options.output)

    if (options.save):
        highgui.cvSaveImage(options.save, ct.drawimg)    
예제 #3
0
파일: util.py 프로젝트: Calm-wy/kwc-ros-pkg
def get_sub_image(image, coord, sizex=10, sizey=10, save=True):

    high_x = coord.x + int(round(sizex / 2.0))
    low_x = coord.x - int(sizex / 2.0)
    high_y = coord.y + int(round(sizey / 2.0))
    low_y = coord.y - int(sizey / 2.0)

    if high_x > image.width - 1:
        high_x = image.width - 1
        low_x = image.width - 1 - sizex
    elif low_x < 0:
        low_x = 0
        high_x = sizex - 1

    if high_y > image.height - 1:
        high_y = image.height - 1
        low_y = image.height - 1 - sizey
    elif low_y < 0:
        low_y = 0
        high_y = sizey - 1

    b = image[low_y:high_y, low_x:high_x]

    if save:
        curtime = time.localtime()
        curtime_raw = time.time()
        i = float(100 * (curtime_raw - int(curtime_raw)))
        date_name = time.strftime('%Y%m%d%I%M%S_' + str(i), curtime)
        highgui.cvSaveImage(date_name + 'dot.png', b)

    return {'sub_img': b, 'sub_img_top_left': cv.cvPoint(low_x, low_y)}
예제 #4
0
파일: scanner.py 프로젝트: gt-ros-pkg/hrl
 def save_artag_image(self,name):    
     
     filename = self.config.path+'/data/'+name+'_artag_image.png'
     print "Saving: "+filename
     highgui.cvSaveImage(filename,self.img)
     
     return '/data/'+name+'_artag_image.png'
예제 #5
0
파일: scanner.py 프로젝트: wklharry/hrl
    def save_data(self, name, metadata=True, angle=None):
        dict = {
            'laserscans': self.laserscans,
            'l1': self.config.thok_l1,
            'l2': self.config.thok_l2,
            'image_angle': angle
        }

        prefix = self.config.path + '/data/' + name
        print "Saving: " + prefix + '_laserscans.pkl'
        ut.save_pickle(dict, prefix + '_laserscans.pkl')
        print "Saving: " + prefix + '_image.png'
        highgui.cvSaveImage(prefix + '_image.png', self.img)

        if metadata:
            # save metadata to database:
            database = scans_database.scans_database()
            database.load(self.config.path, 'database.pkl')
            dataset = scan_dataset.scan_dataset()
            dataset.id = name
            dataset.scan_filename = 'data/' + name + '_laserscans.pkl'
            dataset.image_filename = 'data/' + name + '_image.png'
            database.add_dataset(dataset)
            database.save()

        return name
예제 #6
0
파일: scanner.py 프로젝트: wklharry/hrl
    def save_artag_image(self, name):

        filename = self.config.path + '/data/' + name + '_artag_image.png'
        print "Saving: " + filename
        highgui.cvSaveImage(filename, self.img)

        return '/data/' + name + '_artag_image.png'
예제 #7
0
def video_shot(args):
    start_time = time.time()
    captures = {}
    cut_list = []
    cut_video = CutVideo()
    init_extract = InitExtract()
    ncpus = cpu_count()
    queue_list = []
    sensitivity = 0.35
    temporary = Temporary()
    video_process = VideoProcess()
    try:
        file_input_name = args[args.index("-i") + 1]
        output_directory = args[args.index("-o") + 1]
    except ValueError:
        sys.exit("Usage: videoShot -i <inputFile> -o <outputDirectory>")
    temporary_directory = temporary.createDirectory()
    print "Converting video to ogg..."
    start_time2 = time.time()
    convert_video_to_ogg(file_input_name, temporary_directory)
    start_time3 = time.time()
    ogg_video_path = os.path.join(temporary_directory, "video_converted.ogg")
    output_segmentation_directory = output_directory + "/segmentation_video/"
    file_name_save = output_segmentation_directory + "/transitions_video/"
    file_video_save = output_segmentation_directory + "/parts_videos/"
    file_audio_save = output_segmentation_directory + "/video_audio/"
    thumbnails_save_path = output_segmentation_directory + "/thumbnails/"
    create_directory(
        [output_segmentation_directory, file_name_save, file_video_save, file_audio_save, thumbnails_save_path]
    )
    file_input_name = ogg_video_path
    capture = init_extract.createCapture(file_input_name)
    video_duration = get_video_duration(ogg_video_path)
    fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS)
    total_frames = round(video_duration * fps, 0)
    frames_bloc = int(total_frames / ncpus)
    split_video(temporary_directory, ncpus, video_duration, ogg_video_path)
    list_videos_path = get_videos_path(temporary_directory)
    captures[1] = init_extract.createCapture(list_videos_path[0])
    cvSaveImage(file_name_save + "trans_time_1.jpg", init_extract.initFrameCapture(captures[1]))
    for i in range(2, ncpus + 1):
        captures[i] = init_extract.createCapture(list_videos_path[i - 1])
    print "Finding transitions..."
    video_process.create_video_process(
        captures, sensitivity, frames_bloc, file_input_name, file_name_save, file_video_save, ncpus, queue_list
    )
    for i in range(ncpus):
        cut_list.extend(queue_list[i].get())
    cut_list = [round(x, 6) for x in cut_list]
    time_cut_list = cut_video.position_cut_list(cut_list, ncpus)
    print "Generating Segments..."
    video_process.create_cut_process(file_input_name, file_video_save, time_cut_list, ncpus)
    get_output_audio(file_audio_save, ogg_video_path)
    get_video_thumbnails(file_video_save, thumbnails_save_path)
    temporary.removeDirectory(temporary_directory)
    print
    print "Conversion Time: %.2f s" % (start_time3 - start_time2)
    print "Segmentation Time: %.2f s" % ((time.time() - start_time) - (start_time3 - start_time2))
    print "Segmentation completed in : %.2f s" % (time.time() - start_time)
예제 #8
0
def display(vec, name):
    patch, context = reconstruct_input(vec)
    patch = scale_image(patch, 5)
    context = scale_image(context, 5)
    hg.cvSaveImage(name + '_patch.png', patch)
    hg.cvSaveImage(name + '_context.png', context)
    hg.cvShowImage('image', patch)
    hg.cvShowImage('context', context)
    hg.cvWaitKey()
예제 #9
0
def display(vec, name):
    patch, context = reconstruct_input(vec)
    patch = scale_image(patch, 5)
    context = scale_image(context, 5)
    hg.cvSaveImage(name + '_patch.png', patch)
    hg.cvSaveImage(name + '_context.png', context)
    hg.cvShowImage('image', patch)
    hg.cvShowImage('context', context)
    hg.cvWaitKey()
예제 #10
0
def video_shot(args):
    start_time = time.time()
    captures = {}
    cut_list = []
    cut_video = CutVideo()
    init_extract = InitExtract()
    ncpus = cpu_count()
    queue_list = []
    sensitivity = 0.35
    temporary = Temporary()
    video_process = VideoProcess()
    try:
        file_input_name = args[args.index('-i') + 1]
        output_directory = args[args.index('-o') + 1]
    except ValueError:
        sys.exit('Usage: videoShot -i <inputFile> -o <outputDirectory>')
    temporary_directory = temporary.createDirectory()
    print "Converting video to ogg..."
    start_time2 = time.time()
    convert_video_to_ogg(file_input_name, temporary_directory)
    start_time3 = time.time()
    ogg_video_path = os.path.join(temporary_directory, "video_converted.ogg")
    output_segmentation_directory = output_directory + '/segmentation_video/'
    file_name_save = (output_segmentation_directory + '/transitions_video/')
    file_video_save = (output_segmentation_directory + '/parts_videos/')
    file_audio_save = (output_segmentation_directory + '/video_audio/')
    thumbnails_save_path = (output_segmentation_directory + '/thumbnails/')
    create_directory([output_segmentation_directory, file_name_save, file_video_save, file_audio_save, thumbnails_save_path])
    file_input_name = ogg_video_path
    capture = init_extract.createCapture(file_input_name)
    video_duration = get_video_duration(ogg_video_path)
    fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS)
    total_frames = round(video_duration * fps, 0)
    frames_bloc = int(total_frames / ncpus)
    split_video(temporary_directory, ncpus, video_duration, ogg_video_path)
    list_videos_path = get_videos_path(temporary_directory)
    captures[1] = init_extract.createCapture(list_videos_path[0])
    cvSaveImage(file_name_save + 'trans_time_1.jpg', init_extract.initFrameCapture(captures[1]))
    for i in range(2, ncpus + 1):
        captures[i] = init_extract.createCapture(list_videos_path[i-1])
    print "Finding transitions..."
    video_process.create_video_process(captures, sensitivity, frames_bloc, file_input_name, file_name_save, file_video_save, ncpus, queue_list)   
    for i in range(ncpus):
        cut_list.extend(queue_list[i].get())
    cut_list = [round(x,6) for x in cut_list]        
    time_cut_list = cut_video.position_cut_list(cut_list, ncpus)
    print "Generating Segments..."
    video_process.create_cut_process(file_input_name, file_video_save, time_cut_list, ncpus)
    get_output_audio(file_audio_save, ogg_video_path)
    get_video_thumbnails(file_video_save, thumbnails_save_path)
    temporary.removeDirectory(temporary_directory)
    print 
    print "Conversion Time: %.2f s" % (start_time3 - start_time2)
    print "Segmentation Time: %.2f s" % ((time.time() - start_time) - (start_time3 - start_time2)) 
    print "Segmentation completed in : %.2f s" % (time.time() - start_time) 
           
예제 #11
0
    def onShot(self, event):
        frame = gui.cvQueryFrame(self.capture)
        self.playTimer.Stop()
        gui.cvSaveImage("foo.png", frame)

        self.hasPicture = True
        self.shotbutton.Hide()
        self.retrybutton.Show()
        self.Layout()
        event.Skip()
예제 #12
0
    def onShot(self, event):
        frame = gui.cvQueryFrame(self.capture)
        self.playTimer.Stop()
        gui.cvSaveImage("foo.png", frame)        

        self.hasPicture = True
        self.shotbutton.Hide()
        self.retrybutton.Show()
        self.Layout()
        event.Skip()
예제 #13
0
 def resizeImage(self, image_location, ouput_location, size):
     """
         resizes the image to a rectangle with the given size and saves it
     """
     width = size
     height = size
     
     input_image = highgui.cvLoadImage(image_location, 1) # flag: >0 the loaded image is forced to be a 3-channel color image
     
     output_image = cv.cvCreateImage(cv.cvSize(cv.cvRound(width), cv.cvRound(height)), 8, 3);
     cv.cvResize(input_image, output_image, cv.CV_INTER_LINEAR);
     highgui.cvSaveImage(ouput_location, output_image) # save the image to file
예제 #14
0
def main():
	"""
	Just the test
	This method is a good resource on how to handle the results.
	Save images in this method if you have to.
	"""

	filename = sys.argv[1]
	image = highgui.cvLoadImage (filename)

	cutRatios = [lib.PHI]
	#cutRatios = [0.618]
	settings = Settings(cutRatios)
	image = highgui.cvLoadImage (filename)
	thickness = 4
	settings.setMarginPercentage(0.025)
	settings.setMethod(sys.argv[3])
	cut = int(sys.argv[2])
	winname = sys.argv[1]
	#settings.setThresholds(100,150)
	# Set the color for the boxes
	#color = lib.COL_BLACK
	#color = lib.COL_WHITE
	#color = lib.COL_RED
	color = lib.COL_GREEN
	#color = lib.COL_BLUE

	blobImg = blobResult(image, settings, cut)
	boxxImg = boundingBoxResult(image, settings, cut, thickness, color)
	cutt = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cut]
	# cuttet verdi, dog skal det vi generaliseres lidt
	oriantesen = cutt.getPoints()[0].x == cutt.getPoints()[1].x
	if oriantesen:
		cutPixel = cutt.getPoints()[1].x
	else:
		cutPixel = cutt.getPoints()[1].y
	
	if oriantesen:
	#	print 'hej'
		cv.cvLine(boxxImg, cv.cvPoint(cutPixel, cutt.getPoints()[0].y), cv.cvPoint(cutPixel, cutt.getPoints()[1].y), lib.COL_RED)
	else:
		cv.cvLine(boxxImg, cv.cvPoint(cutt.getPoints()[0].x, cutPixel), cv.cvPoint(cutt.getPoints()[1].x, cutPixel), lib.COL_RED)
	# Save images
	highgui.cvSaveImage('flood_cut_%s.png' % cut, boxxImg)
	highgui.cvSaveImage('blobs_cut_%s.png' % cut, blobImg)

	# Show images
	compareImages(blobImg, boxxImg, "blob", winname)
예제 #15
0
파일: pyfaces.py 프로젝트: ruchir-hj/i-home
def recognize_face():
    try:
        argsnum=len(sys.argv)
        print "args:",argsnum
        #if(argsnum<5):
         #   print "usage:python pyfaces.py imgname dirname numofeigenfaces threshold "
          #  sys.exit(2)                
        #imgname=sys.argv[1]
        #dirname=sys.argv[2]
        #egfaces=int(sys.argv[3])
        #thrshld=float(sys.argv[4])

        capture=hg.cvCreateCameraCapture(0)
        hg.cvNamedWindow("Snapshot")
        i=0
        #time.sleep(1)
        myframe=0
        imgname='sample.png'
        dirname='images'
        egfaces=5
        thrshld=0.3
        #frame=0
        
        while 1:     
            frame=hg.cvQueryFrame(capture)
            #print type(frame)
            hg.cvShowImage("Snapshot",frame)
            key = hg.cvWaitKey(5)
            if key=='c' or key=='C':
                hg.cvDestroyWindow("Snapshot")
                hg.cvSaveImage(imgname,frame)
                global_frame=frame
                break   
                #print frame   

        #sys.exit(0)

        pyf=PyFaces(imgname,dirname,egfaces,thrshld,frame)
        #if pyfaces returns false then save this image into images folder
        hg.cvReleaseCapture(capture) 
        return pyf.getFileName()

    except Exception,detail:
        print detail
        print "usage:python pyfaces.py imgname dirname numofeigenfaces threshold "
예제 #16
0
    def normalizeFaces(self, useSize=True, size=None):
        """
            Normalizes all the images in the cropped_faces_dir
        """
        max_size = 0
        if size == None:
            # use default size if none
            size = DEFAULT_FACE_SIZE
            
        if useSize == False:
            # use the maximum face size found
            max_size = self.__findMaxSize()
        else:
            max_size = size
            
        # loop over the original images
        cropped_files = self.getCroppedFaceImages()
        self.filecount = len(cropped_files)

        if self.startCallback is not None:
            self.startCallback(self.filecount)
            
        print "Normalizing " + str(self.filecount) + " images"
        for index, fname in enumerate(cropped_files):
            image_location = self.cropped_faces_dir + "\\" + fname

            image = highgui.cvLoadImage(image_location, 1) # a cropped non-normalized image
            p = re.compile(CROPFACE_FILENAME_PATTERN)
            m = p.match(fname)
            prefix = m.group("prefix")
            image_index = m.group("image_index")
            face_index = m.group("face_index")
            
            norm_image = self.__normImage(image, max_size) # normalize the image

            norm_filename = prefix + "_" + image_index + "_norm_" + face_index + ".jpg"
            location = self.norm_dir + "\\" + norm_filename
            highgui.cvSaveImage(location, norm_image) # save the image to file
            
            if self.iterCallback is not None:
                self.iterCallback(index)
예제 #17
0
def main():
    usage = "%s [options]  <imgfile> " % (sys.argv[0])
    version = "%prog 0.2\n Longbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image')
    oparser.add_option('-c','--contour', action="store_true", dest = 'contour', default = False, help = 'show object contour')
    oparser.add_option('-i','--image', action="store_true", dest = 'image', default = False, help = 'show original images')
    oparser.add_option('-n', '--number', dest = 'num', type='int', default = 200 , help = 'the number of feature points')
    oparser.add_option('-x','--enlarge', dest = 'enlarge', default = 1.0 , type = float,  help = 'resize images, default:1.0')
    oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file')
    oparser.add_option('-p', '--pointfile', dest = 'pointfile', default = None, help = 'use pointfile ')
    oparser.add_option('-r', '--harris', dest = 'harris', default = False, action = "store_true", help = 'use harris detector')
    oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file')
    

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 2:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)
    if (options.pointfile == None and options.harris == None): 
        print >> sys.stderr, "either of  pointfile and harris can be valid"
        sys.exit(1)

    highgui.cvNamedWindow ("Corner1", 1)
    ct = Linker(options.contour, options.image, options.enlarge, options.num)
    if (options.pointfile): 
        ct.LoadPoints(options.pointfile)
        ct.LinkPoints(args[1])
    else:
        ct.HarrisPoints(args[1])
        ct.LinkPoints(args[1])
    highgui.cvShowImage ("Corner1", ct.drawimg)
    highgui.cvWaitKey (0)   
    if (options.save):
        highgui.cvSaveImage(options.save, ct.drawimg)    
    if (options.output):
        f = open(options.output, "w")
        f.write(OUT.getvalue())
        f.close()
        OUT.close()
예제 #18
0
def videoShot(args):
	w = time.time()
	file_atual = os.getcwd()
	initExtract = InitExtract()
	videoprocess = VideoProcess()
	shotvideo = ShotVideo()
	cutvideo = CutVideo()
	captures = {}
	cut_list=[]
	sensitivity = 0.35
	ncpus = cpu_count()
	queue_list=[]
	FileName = args[args.index('-i') + 1]
	output = args[args.index('-o') + 1]
	fileNameSave = (output + '/transitions_' + 'video' + '/')
	fileVideoSave = (output + '/parts_' + 'video'+'/')
	for files in (fileNameSave, fileVideoSave):
		try:
			shutil.rmtree(files)
		except:
			pass
		os.mkdir(files)
	capture = initExtract.createCapture(FileName)
	total_frames = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT)
	# if utilizado para quando o video nao possui o metadado de total frame
	if total_frames == 0:
		total_frames = shotvideo.contFrames(capture)
	frames_bloc = int(total_frames / ncpus)
	captures[1] = initExtract.createCapture(FileName)
	cvSaveImage(fileNameSave + 'transition25.jpg', initExtract.initFrameCapture(captures[1]))
	for i in range(2, ncpus + 1):
		captures[i] = initExtract.createCapture(FileName)
		captures[i] = initExtract.pass_frames(captures[i], frames_bloc, i - 1)
	j = time.time()		
	videoprocess.create_video_process(captures,sensitivity,frames_bloc,FileName,fileNameSave,fileVideoSave,file_atual,ncpus,queue_list)   
	for i in range(ncpus):
		cut_list.extend(queue_list[i].get())
	cut_list = [round(x,6) for x in cut_list]        
	corte = cutvideo.position_cut_list(cut_list,ncpus)
	videoprocess.create_cut_process(FileName,fileVideoSave,file_atual,corte,ncpus)
	print "A segmentacao foi concluida em : %.2f segundos " % (time.time() - w) 
예제 #19
0
파일: util.py 프로젝트: Calm-wy/kwc-ros-pkg
def display_images(image_list, max_x=1200, max_y=1000, save_images=False):
    """
	Display a list of OpenCV images tiled across the screen
	with maximum width of max_x and maximum height of max_y

	save_images - will save the images(with timestamp)
	"""

    curtime = time.localtime()
    date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime)

    loc_x, loc_y = 0, 0
    wins = []
    for i, im in enumerate(image_list):
        if save_images:
            if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F:
                clr = cv.cvCreateImage(cv.cvSize(im.width, im.height),
                                       cv.IPL_DEPTH_8U, 1)
                cv.cvConvertScale(im, clr, 255.0)
                im = clr
            highgui.cvSaveImage('image%d_' % i + date_name + '.png', im)

        window_name = 'image %d' % i
        wins.append((window_name, im))
        highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvMoveWindow(window_name, loc_x, loc_y)
        loc_x = loc_x + im.width
        if loc_x > max_x:
            loc_x = 0
            loc_y = loc_y + im.height
            if loc_y > max_y:
                loc_y = 0
    while True:
        for name, im in wins:
            highgui.cvShowImage(name, im)
        keypress = highgui.cvWaitKey(10)
        if keypress == '\x1b':
            break
예제 #20
0
def main():
    ct = Corner()
    usage = "%s [options] <imgfile>" % (sys.argv[0])
    version = "%prog 0.2\n Longbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image')
    oparser.add_option('-n', '--number', dest = 'num', type='int',default = 200 , help = 'the number of feature points')
    oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file')
    oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file')

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 2:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)
    ct.GetCorner(args[1], options.num)
    if (options.display):
        ct.DrawKeyPoints()
        highgui.cvNamedWindow ("Corner1", 1)
        highgui.cvShowImage ("Corner1", ct.drawimg)
        highgui.cvWaitKey (0)   
    if (options.save):
        highgui.cvSaveImage(options.save, ct.drawimg)    
예제 #21
0
파일: scanner.py 프로젝트: gt-ros-pkg/hrl
 def save_data(self,name, metadata=True, angle = None):
     dict = {'laserscans' : self.laserscans,
         'l1': self.config.thok_l1, 'l2': self.config.thok_l2,
         'image_angle' : angle} 
     
     prefix = self.config.path+'/data/'+name
     print "Saving: "+prefix+'_laserscans.pkl'
     ut.save_pickle(dict,prefix+'_laserscans.pkl')
     print "Saving: "+prefix+'_image.png'
     highgui.cvSaveImage(prefix+'_image.png',self.img)
     
     if metadata:
         # save metadata to database:
         database = scans_database.scans_database()
         database.load(self.config.path,'database.pkl')
         dataset = scan_dataset.scan_dataset()
         dataset.id = name
         dataset.scan_filename = 'data/'+name+'_laserscans.pkl'
         dataset.image_filename = 'data/'+name+'_image.png'
         database.add_dataset(dataset)
         database.save()
     
     return name
예제 #22
0
파일: util.py 프로젝트: janfrs/kwc-ros-pkg
def display_images(image_list, max_x = 1200, max_y = 1000, save_images=False):
	"""
	Display a list of OpenCV images tiled across the screen
	with maximum width of max_x and maximum height of max_y

	save_images - will save the images(with timestamp)
	"""

	curtime=time.localtime()
	date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime)

	loc_x, loc_y = 0, 0
	wins = []
	for i, im in enumerate(image_list):
		if save_images:
			if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F:
				clr = cv.cvCreateImage(cv.cvSize(im.width, im.height), cv.IPL_DEPTH_8U, 1)
				cv.cvConvertScale(im, clr, 255.0)
				im = clr
			highgui.cvSaveImage('image%d_'%i+date_name+'.png', im)

		window_name = 'image %d' % i
		wins.append((window_name, im)) 
		highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE)
		highgui.cvMoveWindow(window_name, loc_x, loc_y)
		loc_x = loc_x + im.width
		if loc_x > max_x:
			loc_x = 0
			loc_y = loc_y + im.height
			if loc_y > max_y:
				loc_y = 0
	while True:
		for name, im in wins:
			highgui.cvShowImage(name, im)
		keypress = highgui.cvWaitKey(10)
		if keypress == '\x1b':
			break
예제 #23
0
def main():

    usage = "%prog [options] <imgfile>"
    version = "%prog 0.2\n Longbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option("-d", "--display", action="store_true", dest="display", default=False, help="display the image")
    oparser.add_option(
        "-m", "--drawnumber", action="store_true", dest="drawnumber", default=False, help="display the point numbers"
    )
    oparser.add_option("-n", "--number", dest="num", type="int", default=200, help="the number of feature points")
    oparser.add_option(
        "-t", "--threshold", dest="threshold", type="int", default=100, help="the threshold for image binarification"
    )
    oparser.add_option("-o", "--output", dest="output", default=None, help="output file")
    oparser.add_option("-s", "--save", dest="save", default=None, help="save the img file")

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 2:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)

    ct = ExtractMSS()
    ct.GetContour(args[1], options)

    if options.display:
        ct.DrawKeyPoints()
        highgui.cvNamedWindow("contour", 1)
        highgui.cvShowImage("contour", ct.drawimg)
        highgui.cvWaitKey(0)

    if options.output:
        ct.mss.save(options.output)

    if options.save:
        highgui.cvSaveImage(options.save, ct.drawimg)
예제 #24
0
파일: ans1.py 프로젝트: yeison/v2hw
def solveHWProblem(theta, scale, function):
    #An ugly fix until we figure out how to completely index non-square images.
    if(image.width > image.height):
        width = image.height
        height = image.height
    else:
        width = image.width
        height = image.width
    dir = sys.argv[1][0:-4]
    fileName = HTML.insertImage(dir, theta/pi, scale, function.__name__)
    size = cv.cvSize(width - 2*scale , height - 2*scale)
    theta_image = cv.cvCreateImage(size, cv.IPL_DEPTH_8U, 1)
    #range(s, value): stay s pixels away from all boundaries.
    #print range(scale, image.height -scale)
    for x in range(scale-1, (width-1) - scale):
        for y in range(scale-1, (height-1) - scale): 
            if(function.__name__ == "dofIA2") :
                theta_image[x-scale, y-scale] = function(x, y, theta, scale)/2 + 128
            else:
                theta_image[x-scale, y-scale] = function(x, y, theta, scale)
    if not os.path.exists(dir):
        os.mkdir(dir)
    highgui.cvSaveImage(fileName, theta_image)
    print "finished: %s" % fileName
예제 #25
0
process_inputs               = False 
separate_negatives_positives = True
dataset = load_pickle('PatchClassifier.dataset.pickle')
hg.cvNamedWindow('image', 1)
hg.cvNamedWindow('context', 1)
pca_basis = normalize_for_display(dataset.projection_basis)

if show_pca:
    for i in range(pca_basis.shape[1]):
        print 'basis', i
        display(pca_basis[:,i], 'pca_basis'+str(i))

if save_pca_bases:
    large_image = tile_nsave(pca_basis)
    large_image = scale_image(large_image, 8)
    hg.cvSaveImage('pca_large_image.png', large_image)

if process_inputs:
    large_image = tile_nsave(dataset.inputs)
    hg.cvSaveImage('inputs.png', large_image)

if separate_negatives_positives:
    r, c = np.where(dataset.outputs == 0)
    negatives = tile_nsave(dataset.inputs[:,c.A[0]])
    r, c = np.where(dataset.outputs == 1)
    positives = tile_nsave(dataset.inputs[:,c.A[0]])
    hg.cvSaveImage('negatives.png', negatives)
    hg.cvSaveImage('positives.png', positives)


예제 #26
0
def opencv_scale(filename, width, height):
    im = highgui.cvLoadImage(filename)
    newim = cv.cvCreateImage(cv.cvSize(width, height), 8, 3)
    cv.cvResize(im, newim, cv.CV_INTER_AREA)
    highgui.cvSaveImage("outcv.jpg", newim)
예제 #27
0
파일: cv1.py 프로젝트: hxfabc2012/OpenQbo-1
    return scaled


if __name__ == '__main__':
    import opencv.highgui as hg
    import rospy
    from photo.srv import *
    #a = create_ros_image()
    #print a

    rospy.wait_for_service('/photo/capture')
    say_cheese = rospy.ServiceProxy('/photo/capture', Capture)
    ros_img = say_cheese().image
    print dir(ros_img)
    cv_img = ros2cv(ros_img)
    hg.cvSaveImage('test.png', cv_img)

#def create_ros_image(width=1, height=1, channels=2, data='12'):
#    d1 = MultiArrayDimension(label='height',   size=height,   stride=width*height*channels)
#    d2 = MultiArrayDimension(label='width',    size=width,    stride=width*channels)
#    d3 = MultiArrayDimension(label='channels', size=channels, stride=channels)
#
#    layout     = MultiArrayLayout(dim = [d1,d2,d3])
#    multiarray = UInt8MultiArray(layout=layout, data=data)
#    return RosImage(label='image', encoding='bgr', depth='uint8', uint8_data=multiarray)

###
## Fill holes in a binary image using scipy
##
#def fill_holes(cv_img):
#    img_np     = ut.cv2np(cv_img)
예제 #28
0
def main():
    usage = "%s [options]  <imgfile> " % (sys.argv[0])
    version = "%prog 0.2\n Longbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option('-d',
                       '--display',
                       action="store_true",
                       dest='display',
                       default=False,
                       help='display the image')
    oparser.add_option('-c',
                       '--contour',
                       action="store_true",
                       dest='contour',
                       default=False,
                       help='show object contour')
    oparser.add_option('-i',
                       '--image',
                       action="store_true",
                       dest='image',
                       default=False,
                       help='show original images')
    oparser.add_option('-n',
                       '--number',
                       dest='num',
                       type='int',
                       default=200,
                       help='the number of feature points')
    oparser.add_option('-x',
                       '--enlarge',
                       dest='enlarge',
                       default=1.0,
                       type=float,
                       help='resize images, default:1.0')
    oparser.add_option('-o',
                       '--output',
                       dest='output',
                       default=None,
                       help='output file')
    oparser.add_option('-p',
                       '--pointfile',
                       dest='pointfile',
                       default=None,
                       help='use pointfile ')
    oparser.add_option('-r',
                       '--harris',
                       dest='harris',
                       default=False,
                       action="store_true",
                       help='use harris detector')
    oparser.add_option('-s',
                       '--save',
                       dest='save',
                       default=None,
                       help='save the img file')

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 2:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)
    if (options.pointfile == None and options.harris == None):
        print >> sys.stderr, "either of  pointfile and harris can be valid"
        sys.exit(1)

    highgui.cvNamedWindow("Corner1", 1)
    ct = Linker(options.contour, options.image, options.enlarge, options.num)
    if (options.pointfile):
        ct.LoadPoints(options.pointfile)
        ct.LinkPoints(args[1])
    else:
        ct.HarrisPoints(args[1])
        ct.LinkPoints(args[1])
    highgui.cvShowImage("Corner1", ct.drawimg)
    highgui.cvWaitKey(0)
    if (options.save):
        highgui.cvSaveImage(options.save, ct.drawimg)
    if (options.output):
        f = open(options.output, "w")
        f.write(OUT.getvalue())
        f.close()
        OUT.close()
예제 #29
0
                    highgui.cvDestroyAllWindows()
                    sys.exit(0)
                    break

                if k == 'n':
                    #image par image. util si la var image_par_image=1 ligne 42
                    None

            k = highgui.cvWaitKey(10)
            if k == 'b':
                # recupere l image en appuyant sur b
                frameGrayBg = cv.cvCloneImage(frameGray)

            if k == 's':
                # save configuration
                highgui.cvSaveImage("background.bmp", frameGrayBg)
                logfile = open('config', 'w')
                text = str(nb_div_zone[0]) + "\n" + str(
                    seuil_binary[0]) + "\n" + str(gain[0]) + "\n" + str(
                        param_liss[0]) + "\n" + str(param2_liss[0])
                logfile.write(text)
                logfile.close()
                mess_saved = 1

            if k == '\x1b':
                # user has press the ESC key, so exit
                highgui.cvDestroyAllWindows()
                break

    sys.exit(0)
예제 #30
0
def Process(inFile, outFile):
	image1 = highgui.cvLoadImage(inFile,0)
	sz = cv.cvGetSize(image1)
	image2 = cv.cvCreateImage(sz, 8, 1)
	cv.cvConvertScale(image1, image2, 1.2, 0)
	highgui.cvSaveImage(outFile, image2)
예제 #31
0
            # display the frames to have a visual output
            highgui.cvShowImage ('Camera', img)
            # write the frame to the output file
            #highgui.cvWriteFrame (writer, img)
        
        if keyinput[K_ESCAPE]:
            # user has press the ESC key, so exit
            #highgui.cvReleaseVideoWriter (writer)
            highgui.cvDestroyAllWindows()
            highgui.cvReleaseCapture (capture)
            pygame.quit()
            #sys.exit ()
            break
        
        if keyinput[K_s]:
            highgui.cvSaveImage("snapshot.BMP", img)

        if keyinput[K_b]:
            img = highgui.cvQueryFrame (capture)
            PILimg = opencv.adaptors.Ipl2PIL(img)
            PILimg = PILimg.filter(ImageFilter.BLUR)
            opencvimg = opencv.adaptors.PIL2Ipl(PILimg)
            highgui.cvShowImage ('Canny', opencvimg)

        if keyinput[K_c]:
            img = highgui.cvQueryFrame (capture)
            PILimg = opencv.adaptors.Ipl2PIL(img)
            PILimg = PILimg.filter(ImageFilter.CONTOUR)
            opencvimg = opencv.adaptors.PIL2Ipl(PILimg)
            highgui.cvShowImage ('Canny', opencvimg)
예제 #32
0
def static_test():
    image_filename = "/usr/share/doc/opencv-doc/examples/c/lena.jpg"
    i = highgui.cvLoadImage(image_filename)
    result = harrisResponse(i)
    highgui.cvSaveImage("harris_response_lena_opencv.jpg", result)
    return result
예제 #33
0
파일: pyfaces.py 프로젝트: ruchir-hj/i-home
        print "num of eigenfaces used:",self.egfnum
        try:
            self.facet.checkCache(self.imgsdir,extn,self.imgnamelist,self.egfnum,self.threshold)
        except Exception, inst:
            #print "failed :",inst.message
            print "failed :"
        else:
            mindist,matchfile=self.facet.findmatchingimage(self.testimg,self.egfnum,self.threshold)
            if not matchfile or mindist==0.0:
                print "NOMATCH! try higher threshold"
                print "including in image database :", testimg
                self.numimgs = self.numimgs + 1
                newname="image" + str(self.numimgs) + ".png"
                os.chdir("images")
                self.i_home_imagefile = newname
                hg.cvSaveImage(newname ,frame)
                os.chdir("..")
                
            else:
                print "matches :"+matchfile+" dist :"+str(mindist)
                self.i_home_imagefile = matchfile.split('\\')[1]

    def getFileName(self):
        return self.i_home_imagefile
        
        
    def setselectedeigenfaces(self,selectedeigenfaces,ext):        
        #call eigenfaces.parsefolder() and get imagenamelist        
        self.imgnamelist=self.facet.parsefolder(self.imgsdir,ext)                    
        self.numimgs=len(self.imgnamelist)        
        if(selectedeigenfaces >= self.numimgs  or selectedeigenfaces == 0):
예제 #34
0
def findMitochondria():
    
    if 0:
        values = []
        domain = arange(-10.0, 10.0, 0.1)
        for x in domain:
            values.append(gaussian(x, 10, 1))
        plot(domain, values)
        show()
    
    for imageIndex in range(34):
        
        #i = Image.open('images/circular_regions.bmp')
        #i = Image.open("C:\\temp\\gradient_is_just_a_blur_function_data_set2_threshold.tif")
    
        #i = Image.open("O:\\images\\HPFcere_vol\\gradient_is_just_a_blur_function_data_set2\\8bit_trimmed\\thresholded\\out%04d.tif" % imageIndex)
        #originalImage = Image.open("O:\\images\\HPFcere_vol\\HPF_rotated_tif\\8bit\\training\\out%04d.tif" % (imageIndex + 3))
        
        i = Image.open("O:\\images\\HPFcere_vol\\HPF_rotated_tif\\median_then_gaussian_8bit_classified_pixels\\tif\\out%04d.tif" % imageIndex) #todo: this should probably use the + 3 also
        originalImage = Image.open("O:\\images\\HPFcere_vol\\HPF_rotated_tif\\median_then_gaussian_8bit\\out%04d.tif" % (imageIndex + 3))
    
        print imageIndex
        
        #numpy_arr = array(numpy.asarray(i))[:,:,0]
        numpy_arr = array(numpy.asarray(i))
        print "numpy_arr", numpy_arr.shape
        numpy_original = array(numpy.asarray(originalImage))
        print "numpy_original", numpy_original.shape
        
        size = cvSize(numpy_arr.shape[0], numpy_arr.shape[1])
    
        smoothedImage = cvCreateImage(size,8,1)
        #smoothedImage = cvCreateMat(numpy_arr.shape[1], numpy_arr.shape[0], CV_8UC1)
        cvSetZero(smoothedImage)
    
        originalImage = cvCreateImage(size, 8, 1)
        contours_image = cvCreateImage(size, 8, 3)
        resultContoursImage = cvCreateImage(size, 8, 3)
        cvSetZero(contours_image)
        
        #print numpy_arr.shape[0]
        #print numpy_arr.shape[1]
        for i in range(numpy_arr.shape[1]):
            for j in range(numpy_arr.shape[0]):
                #print i,j
                #cvmSet(smoothedImage, i, j, int(numpy_arr[i,j])) # this is setting the data incorrectly like it thinks the size of the entry is something other than 8 bits, maybe it thinks 32 bits
    
                #smoothedImage[i,j] = int(numpy_arr[j,i])
                if numpy_arr[j,i] > 100:
                    smoothedImage[i,j] = 1
                
        if 1:
            storage = cvCreateMemStorage(0)
            #print "position 1"
            nb_contours, contours = cvFindContours(smoothedImage,
                                                      storage,
                                                      sizeof_CvContour,
                                                      CV_RETR_LIST,
                                                      CV_CHAIN_APPROX_SIMPLE,
                                                      cvPoint(0,0))
            #print "contours", contours
            if contours == None:
                print "no contours"
                continue
            
            #print "position 2", contours.total
            #contours = cvApproxPoly(contours, sizeof_CvContour,
            #                           storage,
            #                           CV_POLY_APPROX_DP, 1, 1)
    
            #print "position 3"
            _red = cvScalar(0,0,255,0)
            _green = cvScalar(0,255,0,0)
            
            levels = 3
            
            _contours = contours
            
            #print _contours
            #for c in _contours.hrange():
            #    print c
            #    print cvFitEllipse2(c)
    
    
    
            for i in range(numpy_arr.shape[1]):
                for j in range(numpy_arr.shape[0]):
                    contours_image[i,j] = int(numpy_original[j,i])
                    originalImage[i,j] = int(numpy_original[j,i])
                    resultContoursImage[i,j] = int(numpy_original[j,i])
    
            
            cvDrawContours(contours_image, _contours,
                              _red, _green,
                              levels, 1, CV_AA,
                              cvPoint(0, 0))
    
            # This cycle draw all contours and approximate it by ellipses.
            contourIndex = 0
            for c in _contours.hrange():
                count = c.total; # This is number point in contour
                #print c
                if c.v_next != None: print "c.v_next", c.v_next
                if c.v_prev != None: print "c.v_prev", c.v_prev
                
                print c.flags
                c.flags = 1117327884
                #print "c.h_next", c.h_next
                #print "c.h_prev", c.h_prev
    
                size = cvSize(numpy_arr.shape[0], numpy_arr.shape[1])
                contourImage = cvCreateImage(size, 8, 1)
                cvSetZero(contourImage)
                ellipseImage = cvCreateImage(size, 8, 1)
                cvSetZero(ellipseImage)
                andImage = cvCreateImage(size, 8, 1)
                cvSetZero(andImage)
                orImage = cvCreateImage(size, 8, 1)
                cvSetZero(orImage)
                maskedImage = cvCreateImage(size, 8, 1)
                cvSetZero(maskedImage)
    
                resultDisplayImage = cvCreateImage(size, 8, 3)
                cvSetZero(resultDisplayImage)
    
        
                # Number point must be more than or equal to 6 (for cvFitEllipse_32f).        
                if( count < 6 ):
                    continue;
    
                #print cvMatchShapes(c, c, CV_CONTOURS_MATCH_I1)
                
                # Alloc memory for contour point set.
                PointArray = cvCreateMat(1, count, CV_32SC2)
                PointArray2D32f= cvCreateMat( 1, count, CV_32FC2)
                
                # Get contour point set.
                cvCvtSeqToArray(c, PointArray, cvSlice(0, CV_WHOLE_SEQ_END_INDEX));
                
                # Convert CvPoint set to CvBox2D32f set.
                cvConvert( PointArray, PointArray2D32f )
                
                box = CvBox2D()
        
                # Fits ellipse to current contour.
                box = cvFitEllipse2(PointArray2D32f);
                
                # Draw current contour.
                cvDrawContours(contours_image, c, CV_RGB(255,255,255), CV_RGB(255,255,255),0,1,8,cvPoint(0,0));
                cvDrawContours(contourImage, c, CV_RGB(255,255,255), CV_RGB(255,255,255),0,CV_FILLED,8,cvPoint(0,0));
                
                # Convert ellipse data from float to integer representation.
                center = CvPoint()
                size = CvSize()
                center.x = cvRound(box.center.x);
                center.y = cvRound(box.center.y);
                size.width = cvRound(box.size.width*0.5);
                size.height = cvRound(box.size.height*0.5);
                box.angle = -box.angle;
    
                #ellipseWidth = min(size.width, size.height)
                #ellipseHeight = max(size.width, size.height)
                #ellipseAspectRatio = ellipseHeight / ellipseWidth  
    
                #cvEllipse2Poly
                # Alloc memory for contour point set.
                numPolygonPoints = 30
                ellipsePointArray = cvCreateMat(1, numPolygonPoints, CV_32SC2)
                ellipsePointArray2D32f= cvCreateMat( 1, numPolygonPoints, CV_32FC2)
                buffer = [cvPoint(1,1), cvPoint(1,1)]
                #print box.angle
                #cvEllipse2Poly(center, size, int(box.angle), 0, 360, ellipsePointArray2D32f, 1)
                #cvEllipse2Poly(center, size, int(box.angle), 0, 360, buffer, 1)
                
                # Draw ellipse.
                cvEllipse(contours_image, center, size,
                          box.angle, 0, 360,
                          CV_RGB(0,0,255), 1, CV_AA, 0);
                cvEllipse(ellipseImage, center, size,
                          box.angle, 0, 360,
                          CV_RGB(255,255,255), -1, CV_AA, 0);
    
                cvAnd(contourImage, ellipseImage, andImage);
                cvOr(contourImage, ellipseImage, orImage);
    
                andArea = cvSum(andImage)
                orArea = cvSum(orImage)
    
                cvCopy(originalImage, maskedImage, contourImage)
                
                #print orArea
    
                perimeter = cvArcLength(c)
                #print perimeter
    
    
    
                fractionOfOverlap = float(andArea[0]) / float(orArea[0])
    
                amplitude = 1
                overlapValue = gaussian(1.0 - fractionOfOverlap, amplitude, 0.2)
                #perimeterValue = gaussian(abs(74.0 - perimeter), amplitude, 10)
                perimeterValue = gaussian(abs(200.0 - perimeter), amplitude, 150)
                
                #print imageIndex, contourIndex, ". perimeter:", perimeter, "  overlap:", fractionOfOverlap
    
                #color = CV_RGB(int(255.0*overlapValue),int(255.0*perimeterValue),50)
                color = CV_RGB(50,int(255.0*(overlapValue**1)*(perimeterValue**1)),50)
                cvDrawContours(resultDisplayImage, c, color, CV_RGB(255,255,255),0,CV_FILLED,8,cvPoint(0,0));
    
                thickness = 3
                cvDrawContours(resultContoursImage, c, color, CV_RGB(255,255,255),0,thickness,8,cvPoint(0,0));
    
                #cvDrawContours(contours_image, ellipsePointArray, CV_RGB(255,255,255), CV_RGB(128,255,128),0,1,8,cvPoint(0,0))
    
                if 0:
                
                    outputFilename = "c:\\temp\\contour_output\\out%04d_%04d.bmp" % (imageIndex, contourIndex)
                    highgui.cvSaveImage(outputFilename, contourImage)
        
                    outputFilename = "c:\\temp\\contour_output\\out%04d_%04d_and.bmp" % (imageIndex, contourIndex)
                    highgui.cvSaveImage(outputFilename, andImage)
        
                    outputFilename = "c:\\temp\\contour_output\\out%04d_%04d_or.bmp" % (imageIndex, contourIndex)
                    highgui.cvSaveImage(outputFilename, orImage)
        
                    outputFilename = "c:\\temp\\contour_output\\out%04d_%04d_masked.bmp" % (imageIndex, contourIndex)
                    highgui.cvSaveImage(outputFilename, maskedImage)
        
                    outputFilename = "c:\\temp\\contour_output\\out%04d_%04d_display.bmp" % (imageIndex, contourIndex)
                    highgui.cvSaveImage(outputFilename, resultDisplayImage)
    
    
                contourIndex = contourIndex + 1
                
            # Show image. HighGUI use.
            highgui.cvShowImage( "Result", contours_image );
    
        
        if 0:
            highgui.cvNamedWindow("original", 1)
            highgui.cvShowImage("original", smoothedImage)
            
            highgui.cvNamedWindow("contours", 1)
            highgui.cvShowImage("contours", contours_image)
        outputFilename = "c:\\temp\\contour_output\\out%04d.bmp" % imageIndex
        highgui.cvSaveImage(outputFilename, contours_image)
    
        outputFilename = "c:\\temp\\contour_output\\result%04d.bmp" % imageIndex
        highgui.cvSaveImage(outputFilename, resultContoursImage)
    
        #print outputFilename
    print "output written to file stack"
예제 #35
0
 def SaveImage(self, filename):
     cv.cvNot(self.drawimg, self.drawimg)
     highgui.cvSaveImage(filename, self.drawimg)
예제 #36
0
def blob_identification(binary_image):
    from opencv.highgui import cvSaveImage, cvLoadImageM
    from opencv.cv import cvCreateImage, cvGetSize, cvCreateMat, cvSet, CV_RGB, cvResize
    from Blob import CBlob
    from BlobResult import CBlobResult
    from classification import classification
    from os import chdir, environ
    path = environ.get("HOME")
    frame_size = cvGetSize(binary_image)
    blo = cvCreateImage(frame_size, 8, 1)
    resblo = cvCreateMat(240, 320, binary_image.type)
    mask = cvCreateImage(frame_size, 8, 1)
    cvSet(mask, 255)
    myblobs = CBlobResult(binary_image, mask, 0, True)
    myblobs.filter_blobs(325, 2000)
    blob_count = myblobs.GetNumBlobs()
    count = 0
    pixr = []
    pixrm = []
    for i in range(blob_count):
        value = []
        rowval = []
        colval = []
        cvSet(blo, 0)
        my_enum_blob = myblobs.GetBlob(i)
        my_enum_blob.FillBlob(blo, CV_RGB(255, 0, 255), 0, 0)
        cvSet(resblo, 0)
        cvResize(blo, resblo, 1)
        for rowitem in range(resblo.rows):
            for colitem in range(resblo.cols):
                if resblo[rowitem, colitem] != 0:
                    rowval.append(rowitem)
                    colval.append(colitem)
                    value.append(resblo[rowitem, colitem])
        pixr.append(rowval[0])
        pixrm.append(rowval[-1])
        rowmin = min(rowval)
        rowedit = []
        for item in rowval:
            rowedit.append(item - rowmin)

        coledit = []
        colmin = min(colval)
        for item in colval:
            coledit.append(int(item) - colmin)

        rowmax = max(rowedit)
        colmax = max(colval) - colmin
        moved = cvCreateMat(rowmax + 10, colmax + 10, blo.type)
        cvSet(moved, 0)

        for i in range(len(rowval)):
            moved[int(rowedit[i]) + 5, int(coledit[i]) + 5] = int(value[i])
        chdir(path + "/alpr/latest/blobs")
        cvSaveImage("pic" + str(count) + ".png", moved)
        count += 1
    avoid = classification(pixr, pixrm)
    blob_image = cvCreateImage(frame_size, 8, 1)
    cvSet(blob_image, 0)
    for i in range(blob_count):
        if i not in avoid:
            my_enum_blob = myblobs.GetBlob(i)
            my_enum_blob.FillBlob(blob_image, CV_RGB(255, 0, 255), 0, 0)
            cvSaveImage("blob.jpg", blob_image)
    return
예제 #37
0
def opencv_scale(filename, width, height):
    im = highgui.cvLoadImage(filename)
    newim = cv.cvCreateImage(cv.cvSize(width, height), 8, 3)
    cv.cvResize(im, newim, cv.CV_INTER_AREA)
    highgui.cvSaveImage("outcv.jpg", newim)
예제 #38
0
def main():
	"""
	Just the test
	This method is a god resource on how to handle the results
	"""

	filename = sys.argv[1]
	image = highgui.cvLoadImage (filename)

	cutRatios = [0.75]
	#cutRatios = [lib.PHI]
	settings = Settings(cutRatios)
	image = highgui.cvLoadImage (filename)
	thickness = 4
	settings.setMarginPercentage(0.025)
	cutNo = int(sys.argv[2])
	
	#udtrak af cut
	cut = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cutNo]
	
	# cuttet verdi, dog skal det vi generaliseres lidt
	oriantesen = cut.getPoints()[0].x == cut.getPoints()[1].x
	
	if oriantesen:
		cutPixel = cut.getPoints()[1].x
	else:
		cutPixel = cut.getPoints()[1].y
	

	
	#Get the BW edge image
	edgeImage = expandedMethod.getEdgeImage(image, settings)

	(blobImg, comp) = expandedMethod.analyzeCut(image, edgeImage, cut, settings, 'True')
	#Liste af liste 
	
	# Find the margin
	margin = marginCalculator.getPixels(image, cut, settings.marginPercentage)

	lib.drawMargin(image, cut, margin)
	
	#Udregning af gridet
	gridPointsList = grid.gridIt(blobImg, comp)
	#hvor mange pixel der er pa den ende side i forhold til den anden, i procent
	
	pixelRatio = pixelSideCounter(gridPointsList, cutPixel, oriantesen)
	print pixelRatio
	#Udregning af center og mass
	points = centerOfMass(gridPointsList, oriantesen)
	#Draw the cut
	#print cut.getPoints()[0].y
	#print cut.getPoints()[1].y
	#print cut.getPoints()[0].x
	#print cut.getPoints()[1].x
	#print cutPixel
	
	if oriantesen:
	#	print 'hej'
		cv.cvLine(image, cv.cvPoint(cutPixel, cut.getPoints()[0].y), cv.cvPoint(cutPixel, cut.getPoints()[1].y), COL_RED)
	else:
		cv.cvLine(image, cv.cvPoint(cut.getPoints()[0].x, cutPixel), cv.cvPoint(cut.getPoints()[1].x, cutPixel), COL_RED)
	
	#Draw center of mass
	for point in points:
		if oriantesen:
	#		print 'hej'
	#		print point
			cv.cvLine(image, cv.cvPoint(point, cut.getPoints()[0].y), cv.cvPoint(point, cut.getPoints()[1].y), COL_GREEN)
		else:
	#		print point
			cv.cvLine(image, cv.cvPoint(cut.getPoints()[0].x, point), cv.cvPoint(cut.getPoints()[1].x, point), COL_GREEN)
	lib.drawBoundingBoxes(image, comp, 4, COL_GREEN)
	#highgui.cvSaveImage('floodfillbilledet.png', blobImg)
	highgui.cvSaveImage('centerOfMass.png', image)
	
	showImage(image, 'name')
예제 #39
0
process_inputs = False
separate_negatives_positives = True
dataset = load_pickle('PatchClassifier.dataset.pickle')
hg.cvNamedWindow('image', 1)
hg.cvNamedWindow('context', 1)
pca_basis = normalize_for_display(dataset.projection_basis)

if show_pca:
    for i in range(pca_basis.shape[1]):
        print 'basis', i
        display(pca_basis[:, i], 'pca_basis' + str(i))

if save_pca_bases:
    large_image = tile_nsave(pca_basis)
    large_image = scale_image(large_image, 8)
    hg.cvSaveImage('pca_large_image.png', large_image)

if process_inputs:
    large_image = tile_nsave(dataset.inputs)
    hg.cvSaveImage('inputs.png', large_image)

if separate_negatives_positives:
    r, c = np.where(dataset.outputs == 0)
    negatives = tile_nsave(dataset.inputs[:, c.A[0]])
    r, c = np.where(dataset.outputs == 1)
    positives = tile_nsave(dataset.inputs[:, c.A[0]])
    hg.cvSaveImage('negatives.png', negatives)
    hg.cvSaveImage('positives.png', positives)

#projection_vectors = dr.pca_vectors(dataset.inputs, 0.95)
#projection_vectors = normalize_for_display(projection_vectors)
예제 #40
0
            # display the frames to have a visual output
            highgui.cvShowImage('Camera', img)
            # write the frame to the output file
            #highgui.cvWriteFrame (writer, img)

        if keyinput[K_ESCAPE]:
            # user has press the ESC key, so exit
            #highgui.cvReleaseVideoWriter (writer)
            highgui.cvDestroyAllWindows()
            highgui.cvReleaseCapture(capture)
            pygame.quit()
            #sys.exit ()
            break

        if keyinput[K_s]:
            highgui.cvSaveImage("snapshot.BMP", img)

        if keyinput[K_b]:
            img = highgui.cvQueryFrame(capture)
            PILimg = opencv.adaptors.Ipl2PIL(img)
            PILimg = PILimg.filter(ImageFilter.BLUR)
            opencvimg = opencv.adaptors.PIL2Ipl(PILimg)
            highgui.cvShowImage('Canny', opencvimg)

        if keyinput[K_c]:
            img = highgui.cvQueryFrame(capture)
            PILimg = opencv.adaptors.Ipl2PIL(img)
            PILimg = PILimg.filter(ImageFilter.CONTOUR)
            opencvimg = opencv.adaptors.PIL2Ipl(PILimg)
            highgui.cvShowImage('Canny', opencvimg)
예제 #41
0
print "Finding edges using Canny"
bwe = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
out = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
#edgeDetector.findEdges(image, out, threshold1, threshold2)
edgeDetector.findBWEdges(image, bwe, threshold1, threshold2)

#set if you need the image and the Edges togeter
#cv.cvNot(bwe, bwe)

cv.cvCopy(image, out, bwe)

outname = "edgeDetectorTest"
orgname = "Original"
nystr = str(str(threshold1)+'-'+str(threshold2)+'.png')
print nystr
highgui.cvNamedWindow (outname, highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow (orgname, highgui.CV_WINDOW_AUTOSIZE)
highgui.cvSaveImage(nystr, out)

while True:
	
	highgui.cvShowImage (orgname, image)
	highgui.cvShowImage (outname, out)

	c = highgui.cvWaitKey(0)
	
	if c == 'q':
		print "Exiting ..."
		print ""
		sys.exit(0)
예제 #42
0
def main():

    ct1 = CurvePoint()
    ct2 = CurvePoint()
    sc = CntSC()
    ang = CntAngle()
     
    
    usage = "%prog [options] <imgfile1> <imgfile2>"
    version = "%prog 0.2\nLongbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image')
    oparser.add_option('-n', '--number', dest = 'num',  type="int", default = 200 , help = 'the number of feature points')
    oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file')

    oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file')

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 3:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)

    ct1.GetContour(args[1], options.num)
    allkeys = []
    for c in ct1.allselected:
        allkeys = allkeys + c
    sc.ExtractFeature(allkeys)
    ang.ExtractFeature(allkeys,0); 
    allkeys = []
    ct2.GetContour(args[2], options.num)
    for c in ct2.allselected:
        allkeys = allkeys + c
    sc.ExtractFeature(allkeys)
    ang.ExtractFeature(allkeys,0); 

    sumscore = []
    matcher = SmithWaterman()
    ct1.bDrawNumber = 0
    ct2.bDrawNumber = 0
    if (options.display):
        ct1.DrawKeyPoints()
        ct2.DrawKeyPoints()
    myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5)
    idx = -1
    putoriginal(args[1], ct1.drawimg)
    putoriginal(args[2], ct2.drawimg)
    cv.cvNot(ct1.drawimg, ct1.drawimg)
    cv.cvNot(ct2.drawimg, ct2.drawimg)
    for c1 in ct1.allselected:
        idx += 1
        cscore = -100000000
        cpt1 =   getdata(c1)
        bX = []
        bY = []
        bestcurve = None
        for c2 in ct2.allselected:
            cpt2 =   getdata(c2)
            cost,align,X,Y = matcher.Align(cpt1, cpt2)
            normalized_score = cost - log10(len(c2) + 1) * 1000
            print len(c1), len(c2),cost, normalized_score, cscore
            if (normalized_score > cscore):
                cscore = normalized_score
                bX = X[:]
                bY = Y[:]
                bestcurve = c2
        if (options.display):
            ptcount = 0
            for i in range(len(bX)):
                xi = bX[i]
                yi = bY[i]
                #if (xi == -1):
                    #cv.cvDrawCircle(ct2.drawimg, cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),4, cv.cvScalar(255,0,0,0))
                    #cv.cvPutText(ct2.drawimg, 'O', cv.cvPoint(int(c2[yi].x), int(c2[yi].y)), myfont, cv.cvScalar(255, 0, 0,0))
                #if (yi == -1):
                    #cv.cvDrawCircle(ct1.drawimg, cv.cvPoint(int(c1[xi].x), int(c1[xi].y)),4, cv.cvScalar(255,0,0,0))
                    #cv.cvPutText(ct1.drawimg, 'O', cv.cvPoint(int(c1[xi].x), int(c1[xi].y)), myfont, cv.cvScalar(255, 0, 0,0))
                if (xi != -1 and yi != -1):
                    ptcount  += 1
                    cv.cvDrawCircle(ct1.drawimg, cv.cvPoint(int(c1[xi].x), int(c1[xi].y)),2, clrs[idx])
                    cv.cvPutText(ct1.drawimg, str(ptcount), cv.cvPoint(int(c1[xi].x), int(c1[xi].y)), myfont, clrs[idx])
                    cv.cvDrawCircle(ct2.drawimg, cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),2, clrs[idx])
                    cv.cvPutText(ct2.drawimg, str(ptcount), cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)), myfont, clrs[idx])
        sumscore.append(cscore)
    print sumscore
    if (options.display):            
	    highgui.cvNamedWindow ("contour1", 1)
	    highgui.cvNamedWindow ("contour2", 1)
	    highgui.cvShowImage ("contour1", ct1.drawimg)
	    highgui.cvShowImage ("contour2", ct2.drawimg)
	    highgui.cvWaitKey (0)       
    if (options.save):
        mergeimg = mergeimage_83(ct1.drawimg, ct2.drawimg)
        highgui.cvSaveImage("_sw_result.bmp", mergeimg)
예제 #43
0
def main():

    ct1 = CurvePoint()
    ct2 = CurvePoint()
    sc = CntSC()
    ang = CntAngle()

    usage = "%prog [options] <imgfile1> <imgfile2>"
    version = "%prog 0.2\nLongbin Chen, [email protected]"
    oparser = optparse.OptionParser(usage=usage, version=version)
    oparser.add_option('-d',
                       '--display',
                       action="store_true",
                       dest='display',
                       default=False,
                       help='display the image')
    oparser.add_option('-n',
                       '--number',
                       dest='num',
                       type="int",
                       default=200,
                       help='the number of feature points')
    oparser.add_option('-s',
                       '--save',
                       dest='save',
                       default=None,
                       help='save the img file')

    oparser.add_option('-o',
                       '--output',
                       dest='output',
                       default=None,
                       help='output file')

    (options, args) = oparser.parse_args(sys.argv)

    if len(args) != 3:
        oparser.parse_args([sys.argv[0], "--help"])
        sys.exit(1)

    ct1.GetContour(args[1], options.num)
    allkeys = []
    for c in ct1.allselected:
        allkeys = allkeys + c
    sc.ExtractFeature(allkeys)
    ang.ExtractFeature(allkeys, 0)
    allkeys = []
    ct2.GetContour(args[2], options.num)
    for c in ct2.allselected:
        allkeys = allkeys + c
    sc.ExtractFeature(allkeys)
    ang.ExtractFeature(allkeys, 0)

    sumscore = []
    matcher = SmithWaterman()
    ct1.bDrawNumber = 0
    ct2.bDrawNumber = 0
    if (options.display):
        ct1.DrawKeyPoints()
        ct2.DrawKeyPoints()
    myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5)
    idx = -1
    putoriginal(args[1], ct1.drawimg)
    putoriginal(args[2], ct2.drawimg)
    cv.cvNot(ct1.drawimg, ct1.drawimg)
    cv.cvNot(ct2.drawimg, ct2.drawimg)
    for c1 in ct1.allselected:
        idx += 1
        cscore = -100000000
        cpt1 = getdata(c1)
        bX = []
        bY = []
        bestcurve = None
        for c2 in ct2.allselected:
            cpt2 = getdata(c2)
            cost, align, X, Y = matcher.Align(cpt1, cpt2)
            normalized_score = cost - log10(len(c2) + 1) * 1000
            print len(c1), len(c2), cost, normalized_score, cscore
            if (normalized_score > cscore):
                cscore = normalized_score
                bX = X[:]
                bY = Y[:]
                bestcurve = c2
        if (options.display):
            ptcount = 0
            for i in range(len(bX)):
                xi = bX[i]
                yi = bY[i]
                #if (xi == -1):
                #cv.cvDrawCircle(ct2.drawimg, cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),4, cv.cvScalar(255,0,0,0))
                #cv.cvPutText(ct2.drawimg, 'O', cv.cvPoint(int(c2[yi].x), int(c2[yi].y)), myfont, cv.cvScalar(255, 0, 0,0))
                #if (yi == -1):
                #cv.cvDrawCircle(ct1.drawimg, cv.cvPoint(int(c1[xi].x), int(c1[xi].y)),4, cv.cvScalar(255,0,0,0))
                #cv.cvPutText(ct1.drawimg, 'O', cv.cvPoint(int(c1[xi].x), int(c1[xi].y)), myfont, cv.cvScalar(255, 0, 0,0))
                if (xi != -1 and yi != -1):
                    ptcount += 1
                    cv.cvDrawCircle(ct1.drawimg,
                                    cv.cvPoint(int(c1[xi].x), int(c1[xi].y)),
                                    2, clrs[idx])
                    cv.cvPutText(ct1.drawimg, str(ptcount),
                                 cv.cvPoint(int(c1[xi].x), int(c1[xi].y)),
                                 myfont, clrs[idx])
                    cv.cvDrawCircle(
                        ct2.drawimg,
                        cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),
                        2, clrs[idx])
                    cv.cvPutText(
                        ct2.drawimg, str(ptcount),
                        cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),
                        myfont, clrs[idx])
        sumscore.append(cscore)
    print sumscore
    if (options.display):
        highgui.cvNamedWindow("contour1", 1)
        highgui.cvNamedWindow("contour2", 1)
        highgui.cvShowImage("contour1", ct1.drawimg)
        highgui.cvShowImage("contour2", ct2.drawimg)
        highgui.cvWaitKey(0)
    if (options.save):
        mergeimg = mergeimage_83(ct1.drawimg, ct2.drawimg)
        highgui.cvSaveImage("_sw_result.bmp", mergeimg)
예제 #44
0
def testGaussianBlur():
    """Test that the guassian blur function gives the exact same output
    in Python and in C++ with OpenCV and ideally with SciPy. 
    
    Can run this test with:
    nosetests --with-doctest blur_scipy.py -v
    """
    from pylab import imread
    from opencv import highgui
    import blur_opencv  # a seperate file with the opencv gaussian operation

    # Using Lena image create tests image.
    image_filename = "/usr/share/doc/opencv-doc/examples/c/lena.jpg"
    i = highgui.cvLoadImage(image_filename)

    # Carry out the filtering
    py_scipy = mlGaussianBlur(
        i)  # note - it is decorated to convert between cvMat and NumPy
    py_scipy2 = gaussianBlur(i)
    py_opencv = blur_opencv.gaussianBlur(i)

    # Save the outputs as jpg files
    highgui.cvSaveImage("gaussian_scipy_iir.jpg", py_scipy)
    highgui.cvSaveImage("gaussian_scipy_ndfilt.jpg", py_scipy2)
    highgui.cvSaveImage("gaussian_opencv.jpg", py_opencv)

    # Load in the image data with scipy
    python_opencv_image = imread("gaussian_opencv.jpg")
    python_scipy_image = imread("gaussian_scipy_ndfilt.jpg")
    python_scipy2_image = imread("gaussian_scipy_iir.jpg")

    diff = uint8(
        abs(
            python_opencv_image.astype(float) -
            python_scipy_image.astype(float)))
    diff2 = uint8(
        abs(
            python_opencv_image.astype(float) -
            python_scipy2_image.astype(float)))
    diff3 = uint8(
        abs(
            python_scipy_image.astype(float) -
            python_scipy2_image.astype(float)))

    # For visual inspection:

    from pylab import show, imshow, figure, subplot, title

    figure()
    subplot(1, 3, 1)
    title("The OpenCV Output (Py and C++)")
    imshow(python_opencv_image)
    subplot(1, 3, 2)
    title("SciPy: IIR filter")
    imshow(python_scipy_image)
    subplot(1, 3, 3)
    title("SciPy: ndimage.filters.gaussian_filter")
    imshow(python_scipy2_image)
    figure()
    subplot(1, 3, 1)
    imshow(diff)
    subplot(1, 3, 2)
    imshow(diff2)
    subplot(1, 3, 3)
    imshow(diff3)

    from misc import plot_seperate_rgb
    plot_seperate_rgb(diff)
    plot_seperate_rgb(diff3)
    show()

    # Check that the sum of all differences at each point is 0
    print sum(python_opencv_image.flatten() - python_scipy_image.flatten())
예제 #45
0
def main():

    ct1 = CurvePoint()
    ct2 = CurvePoint()
    agl = CntAngle()
    sc = CntSC()

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ho:dn:es", ["help", "output=", "draw", "num=", "even", "save"])
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    output = None
    bDraw = 0
    bSave = 0
    bOriginal = 0
    npoint = 100

    for o, a in opts:
        if o == "-v":
            ct1.verbose = 1
            ct2.verbose = 1
        if o in ("-h", "--help"):
            usage()
            sys.exit()
        if o in ("-o", "--output"):
            output = a
        if o in ("-d", "--draw"):
            bDraw = 1
        if o in ("-s", "--save"):
            bSave = 1
        if o in ("-r", "--original"):
            bOriginal = 1
        if o in ("-n", "--num"):
            npoint = string.atoi(a)
        if o in ("-e", "--even"):
            ct1.bEven = 1
            ct2.bEven = 1
    if (len(args)) != 2:
        usage()
        sys.exit(2)

    ct1.GetContour(args[0], npoint)
    allkeys = []
    for c in ct1.allselected:
        # agl.ExtractFeature(c, ct1.drawimg)
        allkeys = allkeys + c
    sc.ExtractFeature(allkeys)

    allkeys = []
    ct2.GetContour(args[1], npoint)
    for c in ct2.allselected:
        # agl.ExtractFeature(c, ct2.drawimg)
        allkeys = allkeys + c
    sc.ExtractFeature(allkeys)

    sumscore = []
    matcher = SmithWaterman()
    ct1.bDrawNumber = 0
    ct2.bDrawNumber = 0
    if bDraw:
        ct1.DrawKeyPoints()
        ct2.DrawKeyPoints()
    myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5)
    idx = -1
    putoriginal(args[0], ct1.drawimg)
    putoriginal(args[1], ct2.drawimg)
    cv.cvNot(ct1.drawimg, ct1.drawimg)
    cv.cvNot(ct2.drawimg, ct2.drawimg)
    for c1 in ct1.allselected:
        idx += 1
        cscore = -100000000
        cpt1 = getdata(c1)
        bX = []
        bY = []
        bestcurve = None
        for c2 in ct2.allselected:
            cpt2 = getdata(c2)
            cost, align, X, Y = matcher.Align(cpt1, cpt2)
            normalized_score = cost - log10(len(c2) + 1) * 1000
            print len(c1), len(c2), cost, normalized_score, cscore
            if normalized_score > cscore:
                cscore = normalized_score
                bX = X[:]
                bY = Y[:]
                bestcurve = c2
        if bDraw:
            ptcount = 0
            for i in range(len(bX)):
                xi = bX[i]
                yi = bY[i]
                # if (xi == -1):
                # cv.cvDrawCircle(ct2.drawimg, cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),4, cv.cvScalar(255,0,0,0))
                # cv.cvPutText(ct2.drawimg, 'O', cv.cvPoint(int(c2[yi].x), int(c2[yi].y)), myfont, cv.cvScalar(255, 0, 0,0))
                # if (yi == -1):
                # cv.cvDrawCircle(ct1.drawimg, cv.cvPoint(int(c1[xi].x), int(c1[xi].y)),4, cv.cvScalar(255,0,0,0))
                # cv.cvPutText(ct1.drawimg, 'O', cv.cvPoint(int(c1[xi].x), int(c1[xi].y)), myfont, cv.cvScalar(255, 0, 0,0))
                if xi != -1 and yi != -1:
                    ptcount += 1
                    cv.cvDrawCircle(ct1.drawimg, cv.cvPoint(int(c1[xi].x), int(c1[xi].y)), 2, clrs[idx])
                    cv.cvPutText(ct1.drawimg, str(ptcount), cv.cvPoint(int(c1[xi].x), int(c1[xi].y)), myfont, clrs[idx])
                    cv.cvDrawCircle(ct2.drawimg, cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)), 2, clrs[idx])
                    cv.cvPutText(
                        ct2.drawimg,
                        str(ptcount),
                        cv.cvPoint(int(bestcurve[yi].x), int(bestcurve[yi].y)),
                        myfont,
                        clrs[idx],
                    )
        sumscore.append(cscore)
    print sumscore
    if bDraw:
        highgui.cvNamedWindow("contour1", 1)
        highgui.cvNamedWindow("contour2", 1)
        highgui.cvShowImage("contour1", ct1.drawimg)
        highgui.cvShowImage("contour2", ct2.drawimg)
        highgui.cvWaitKey(0)
    if bSave:
        mergeimg = mergeimage_83(ct1.drawimg, ct2.drawimg)
        highgui.cvSaveImage("_sw_result.bmp", mergeimg)
예제 #46
0
                                                 MPEG1VIDEO, fps, frame_size,
                                                 True)
            # check the writer is OK
            if not writer:
                print "Error opening writer"
                sys.exit(1)
            i += 1

        elif k == 'c':
            # pause
            play = False
            writer = None

        elif k == 's':
            # save image
            highgui.cvSaveImage('out.png', frame)

        elif k == 'p':
            # toggle playing state
            play = not play

            # if paused, update the slider
            if not play:
                pos = highgui.cvGetCaptureProperty(
                    capture, highgui.CV_CAP_PROP_POS_FRAMES)
                print 'Current frame: %d' % pos

    # end working with the writer
    # not working at this time... Need to implement some typemaps...
    # but exiting without calling it is OK in this simple application
    #highgui.cvReleaseVideoWriter (writer)
예제 #47
0
파일: cv1.py 프로젝트: hxfabc2012/OpenQbo-1
def save_image(name, image):
    hg.cvSaveImage(name, image)
예제 #48
0
                            color, -1, 8, 0)

        # we can now display the images
        highgui.cvShowImage ('Camera', frame)
        highgui.cvShowImage ('Histogram', histimg)

        # handle events
        k = highgui.cvWaitKey (10)

	if k == 'w' or k=='a':
	    # the user requested saving the image
	    local_name=str(os.popen('uuidgen').readlines()[0].strip())+'.jpg';
	    print local_name
	    out_file_dir='images/'
	    if not os.path.exists(out_file_dir):
		os.makedirs(out_file_dir);
	    
	    highgui.cvSaveImage (out_file_dir+local_name,frame);
	    fLog=open(out_file_dir+'files.log','a+');
	    print >>fLog,'%s\t%s' %(local_name,time.localtime());
	    fLog.close();

	    if k == 'a':
		# the user requested saving and annotation of the image
		os.system('python2.5 ./submit_img.py %s' % (out_file_dir+local_name));


        if k == '\x1b':
            # user has press the ESC key, so exit
            break
예제 #49
0
 def SaveImage(self, filename):
     cv.cvNot(self.drawimg, self.drawimg)
     highgui.cvSaveImage(filename, self.drawimg)