def write(self, bgrimg): fullpath = self.output_folder + os.sep + '%.3d.png' % (self.id_,) cv.SaveImage(fullpath, bgrimg) self.id_ += 1
centroid = cvblob.Centroid(bigblobs[bigblob]) print "centroid of blob " + str(bigblob) + " is " + str(centroid) print "angle of blob " + str(bigblob) + " is " + str( cvblob.Angle(blobs[bigblob])) cvblob.SaveImageBlob("blob_" + str(bigblob) + ".png", img, blobs[bigblob]) print "point x: 250 y: 100 is blob " + str(cvblob.GetLabel(labelImg, 250, 100)) #cvblob.RenderBlob(labelImg, blobs[bigblob], img, rendered, cvblob.CV_BLOB_RENDER_COLOR|cvblob.CV_BLOB_RENDER_CENTROID|cvblob.CV_BLOB_RENDER_BOUNDING_BOX|cvblob.CV_BLOB_RENDER_ANGLE, cv.RGB(0,0,255), 0.9) #cvblob.RenderBlob(labelImg, blobs[1], img, rendered, cvblob.CV_BLOB_RENDER_COLOR|cvblob.CV_BLOB_RENDER_CENTROID|cvblob.CV_BLOB_RENDER_BOUNDING_BOX|cvblob.CV_BLOB_RENDER_ANGLE, cv.RGB(255,0,255)) #cvblob.RenderBlob(labelImg, blobs[bigblob], img, rendered, cvblob.CV_BLOB_RENDER_COLOR|cvblob.CV_BLOB_RENDER_CENTROID|cvblob.CV_BLOB_RENDER_BOUNDING_BOX|cvblob.CV_BLOB_RENDER_ANGLE) #cvblob.RenderBlob(labelImg, blobs[bigblob], img, rendered) imgOut = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3) cv.Zero(imgOut) cvblob.RenderBlobs( labelImg, blobs, img, imgOut, cvblob.CV_BLOB_RENDER_COLOR | cvblob.CV_BLOB_RENDER_CENTROID | cvblob.CV_BLOB_RENDER_BOUNDING_BOX | cvblob.CV_BLOB_RENDER_ANGLE, 1.0) #cvblob.SetImageROItoBlob(imgOut, blobs[bigblob]); print "mean color for blob " + str(bigblob) + " is " + str( cvblob.BlobMeanColor(blobs[bigblob], labelImg, img)) cv.SaveImage("test_filtered.png", filtered) cv.SaveImage("test_rendered.png", imgOut) #draw the rendered blobs on an outline #cvblob.RenderBlobs(labelImg, blobs, img, imgOut)
# -*- coding: utf-8 -*- import cv # import cv2.cv as cv if __name__ == '__main__': # 画像取得 im = cv.LoadImage("test.jpg") # 画像データ用意 gray = cv.CreateImage(cv.GetSize(im), 8, 1) eq = cv.CreateImage(cv.GetSize(gray), 8, 1) # ヒストグラム平坦化 cv.CvtColor(im, gray,cv.CV_BGR2GRAY) cv.EqualizeHist(gray, eq) # ウィンドウ作成 cv.NamedWindow("Show Image") # 画像表示 cv.ShowImage("Show Image",eq) # キー入力待機 cv.WaitKey(0) # ウィンドウ破棄 cv.DestroyAllWindows() # 画像保存 cv.SaveImage("eq.jpg",eq)
import cv import urllib2 import time import math # insert your hipchat auth token! #grab an image from the camera and save it capture = cv.CaptureFromCAM( -1 ) #-1 will select the first camera available, usually /dev/video0 on linux cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240) im = cv.QueryFrame(capture) cv.SaveImage("/home/pi/dish/capture/sink-latest.jpg", im) #convert the image to grayscale edges = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) cv.CvtColor(im, edges, cv.CV_BGR2GRAY) #edge detect it, then smooth the edges thresh = 120 cv.Canny(edges, edges, thresh, thresh / 2, 3) cv.Smooth(edges, edges, cv.CV_GAUSSIAN, 3, 3) cv.SaveImage("/home/pi/dish/capture/sink-latest-edges.jpg", edges) #find the circles storage = cv.CreateMat(640, 1, cv.CV_32FC3) cv.HoughCircles(edges, storage, cv.CV_HOUGH_GRADIENT, 2, edges.width / 10, thresh, 160, 0, 0)
cv.SetZero(avg) cv.Add(im_downsmpl, sum, sum) if len(word_frame_list) > 1: for frame in word_frame_list[1:]: im = cv.LoadImageM(images_path + capture_date + "/" + frame) im = cv.GetSubRect(im, (205, 145, 225, 95)) cv.ConvertScale(im, im, -1, 255) cv.Threshold(im, im, 165, 0, cv.CV_THRESH_TOZERO) im_downsmpl = cv.CreateMat(im.rows / downsmpl_factor, im.cols / downsmpl_factor, cv.CV_8UC3) cv.Resize(im, im_downsmpl) cv.Add(im_downsmpl, sum, sum) cv.ConvertScale(sum, avg, 1.0) #cv.ConvertScale(sum, avg, 1.0/len(word_frame_list)) print word_list.index(word), for i in range(avg.rows): for j in range(avg.cols): print str(i * avg.cols + j + 1) + ":" + str(avg[i, j][0]), print '\n', filename = "%s_%02d.jpg" % (word, word_timeline.index(word_instance)) cv.SaveImage( output_path + output_dirname_level_1 + "/" + output_dirname_level_2 + "/dataset_images/" + filename, avg)
if vc.isOpened(): # try to get the first frame rval, frame = vc.read() else: rval = False print "\n\n\n\n\npress space to take picture; press ESC to exit" while rval: cv2.imshow("preview", frame) rval, frame = vc.read() key = cv2.waitKey(40) if key == 27: # exit on ESC break if key == 32: # press space to save images cv.SaveImage("webcam.jpg", cv.fromarray(frame)) img = cv.LoadImage("webcam.jpg") # input image mouth = m.findmouth(img) # show(mouth) if mouth != 2: # did not return error mouthimg = crop(mouth) cv.SaveImage("webcam-m.jpg", mouthimg) # predict the captured emotion result = lr.predict(vectorize('webcam-m.jpg')) if result == 1: print "you are smiling! :-) " else: print "you are not smiling :-| " else: print "failed to detect mouth. Try hold your head straight and make sure there is only one face."
#!/usr/bin/python # -*- coding: utf-8 -*- import cv from datetime import datetime if __name__ == '__main__': capture = cv.CaptureFromCAM(0) cv.NamedWindow('Webcam') while True: frame = cv.QueryFrame(capture) cv.ShowImage('Webcam', frame) c = cv.WaitKey(10) % 256 if c == 27: # ESC pressed. Finish the program break elif c == 10: # ENTER pressed. Store image to disk cv.SaveImage("foto" + '.bmp', frame) capture = None cv.DestroyAllWindows()
def ndvi_to_file(band3, band4, out_img='tmp/ndvi.tiff'): cv.SaveImage(out_img, ndvi(band3, band4))
cv.CvtColor(img, bwdst, cv.CV_BGR2GRAY) cv.AdaptiveThreshold(bwdst, bwdst, 255.0, cv.CV_THRESH_BINARY, cv.CV_ADAPTIVE_THRESH_MEAN_C, 11) cv.Dilate(bwdst, bwdst) cv.Erode(bwdst, bwdst) return bwdst def wait(): while True: k = cv.WaitKey(0) % 0x100 if k == 27: break fname = sys.argv[1] img = cv.LoadImage(fname) thresh = threshhold(img) lines = findLines(thresh) ang = degrees(avgAngle(lines)) - 90.0 fix_rotation = rotate(thresh, ang) #we need to convert back to greyscale because ocrfeeder doesn't expect a plain B&W image cv.ShowImage("rotation fixed", fix_rotation) color_dst = cv.CreateImage(cv.GetSize(fix_rotation), 8, 3) cv.CvtColor(fix_rotation, color_dst, cv.CV_GRAY2BGR) cv.SaveImage("skew_fix.jpg", color_dst) wait()
cv.CalcOpticalFlowHS(inputImageFirst, inputImageSecond, False, velx, vely, 100.0,(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,64, 0.01)) for i in range(0, cols, FLOWSKIP): for j in range(0, rows, FLOWSKIP): dx = int(cv.GetReal2D (velx, j, i)) dy = int(cv.GetReal2D (vely, j, i)) cv.Line(desImageHS,(i, j),(i + dx, j + dy), (255, 0, 0), 1, cv.CV_AA, 0) cv.SetZero (velx) cv.SetZero (vely) cv.CalcOpticalFlowLK(inputImageFirst,inputImageSecond,(15,15),velx,vely) for i in range(0, cols, FLOWSKIP): for j in range(0, rows, FLOWSKIP): dx = int(cv.GetReal2D (velx, j, i)) dy = int(cv.GetReal2D (vely, j, i)) cv.Line(desImageLK,(i, j),(i + dx, j + dy), (255, 0, 0), 1, cv.CV_AA, 0) cv.SaveImage("resultHS.png", desImageHS) cv.SaveImage("resultLK.png", desImageLK) cv.NamedWindow("Optical flow HS") cv.ShowImage("Optical flow HS", desImageHS) cv.NamedWindow("Optical flow LK") cv.ShowImage("Optical flow LK", desImageLK) cv.WaitKey(0) cv.DestroyAllWindows()
def ndvi_to_file(band3_img, band4_img, out_img): band3 = cv.LoadImage(band3_img, 0) band4 = cv.LoadImage(band4_img, 0) for i in [band3, band4]: assert i.depth == cv.IPL_DEPTH_8U cv.SaveImage(out_img, ndvi(band3, band4))
bag_file_name = arguments[0] if arguments_size > 1: topic_name = arguments[1] if arguments_size > 2: directory = arguments[2] # Open the bag file: bag = rosbag.Bag(bag_file_name) bridge = CvBridge() # Sweep through messages that match *topic_name*: index = 0 previous_time = None for topic, msg, t in bag.read_messages(topic_name): # Extract *cv_image* in RGB8 mode: index += 1 try: cv_image = bridge.imgmsg_to_cv(msg, desired_encoding="rgb8") except CvBridgeError, e: print e # Save the image cv.SaveImage("{0}/Image{1:03d}.pnm".format(directory, index), cv_image) # Print out time changes: if previous_time == None: previous_time = t dt = t - previous_time previous_time = t print("[{0}]:{1}".format(index, dt))
def main(): import argparse import logging import os import yaml import cv global processes global forest0, svmmodels, training_bosts, hist0 parser = argparse.ArgumentParser() parser.add_argument('classifier') parser.add_argument('cores', type=int, help='Number of processes of paralellism') parser.add_argument( '--postprocess', action="store_true", help='Run postprocessing, close blobs and remove noise') args = parser.parse_args() logging.basicConfig(level=logging.WARNING, format="%(asctime)s - %(message)s") classifier = zipfile.ZipFile(args.classifier) forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \ load_from_classifier(classifier) classifier.close() processes = args.cores pool = Pool(processes=processes) KEY_FRAME_PERIOD = 2 # in seconds q = Manager().Queue() total_frame = 0 new_flag = True while True: if not new_flag: print "wait..." time.sleep(1) stream_list = get_list(CLOUDLET_RESOURCE, STREAM_RESOURCE) new_flag = False prev_stream = None for stream in stream_list: if stream.get("stream_description").find( "denatured" ) == -1 or stream.get("stream_description").find( "video" ) == -1 or stream.get("stream_description").find("pstf") != -1: prev_stream = stream continue ILP_max = [] for i in xrange(len(CLASSES)): ILP_max.append(0) ILP_list = [] for i in xrange(len(CLASSES)): ILP_list.append([]) path, name = stream.get("path").replace("mnt", "cloudletstore").rsplit( '/', 1) print os.path.join(path, name) path_p, name_p = prev_stream.get("path").replace( "mnt", "cloudletstore").rsplit('/', 1) print os.path.join(path_p, name_p) statinfo = os.stat(os.path.join(path_p, name_p)) prev_stream = stream if statinfo.st_size == 0: continue new_flag = True frame_rate = 30 capture = cv.CaptureFromFile(os.path.join(path, name)) frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) total_frames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT) frame = cv.QueryFrame(capture) print frame_rate, total_frames print capture start_time = time.time() key_frame_counter_base = 0 while frame: process_num = 0 while frame: cv.SaveImage("indexing" + "%d.png" % process_num, frame) for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)): frame = cv.QueryFrame(capture) process_num += 1 if process_num == processes: break pool.map( calculate_class, [(q, x) for x in xrange(key_frame_counter_base, key_frame_counter_base + process_num)]) while not q.empty(): q_entry = q.get() key_frame_counter = q_entry[0] ILP = q_entry[1] for class_index, score in enumerate(ILP): if score > SCORE_THRESHOLD: ILP_list[class_index].append( (key_frame_counter * int(KEY_FRAME_PERIOD * frame_rate) + 1, score)) print(CLASSES[class_index], "%.02f" % score), if score > ILP_max[class_index]: ILP_max[class_index] = score print key_frame_counter_base += process_num for class_index, frame_list in enumerate(ILP_list): if not frame_list: continue frame_list_split = split_frame_list( frame_list, int(KEY_FRAME_PERIOD * frame_rate) * 2) for frame_list, local_max_score in frame_list_split: tag_entry = {} tag_entry["tag"] = CLASSES[class_index] + ":%d" % ( ILP_max[class_index] * 100) tag_entry["tag_value"] = local_max_score tag_entry["offset"] = frame_list[0] / frame_rate tag_entry["duration"] = (frame_list[-1] - frame_list[0]) / frame_rate tag_entry["segment"] = stream.get("segment") print tag_entry ret_dict = post(CLOUDLET_RESOURCE, TAG_RESOURCE, tag_entry) if stream.get("stream_description").find("pstf") == -1: stream_entry = { "stream_description": stream.get("stream_description") + "pstf;" } ret_dict = put(CLOUDLET_RESOURCE, stream.get("resource_uri"), stream_entry) elapse_time = time.time() - start_time print "max score:" print[(CLASSES[class_index], "%.02f" % score) for class_index, score in enumerate(ILP_max)] print "total time: %.2f, key frames: %d, frame per sec: %.2f" \ % (elapse_time, key_frame_counter_base, key_frame_counter_base / elapse_time) print
def store_harddisk(self, filename, image): cv.SaveImage(filename, image)
def Run(self): print "start" seed() maxVal = 0.04 file_path = "./" listener = tf.TransformListener() nr_images = 14 # move components to initial position self.sss.move("head", "back") self.sss.move("arm", "calib") self.sss.move("torso", "home") self.sss.move("sdh", "home") self.sss.wait_for_input() self.sss.move("sdh", "calib") self.sss.wait_for_input() # start calbration routine for i in range(1, nr_images): if i == 1: r1 = maxVal r2 = maxVal elif i == 2: r1 = -maxVal r2 = maxVal elif i == 3: r1 = maxVal r2 = -maxVal elif i == 4: r1 = -maxVal r2 = -maxVal else: r1 = (random() - 0.5) * 2 * maxVal r2 = (random() - 0.5) * 2 * maxVal self.sss.move("torso", [[r1, r2, r1, r2]]) self.sss.sleep(1) try: (trans, rot) = listener.lookupTransform('/base_link', '/head_color_camera_r_link', rospy.Time(0)) rpy = euler_from_quaternion(rot) cyaw = cos(rpy[2]) syaw = sin(rpy[2]) cpitch = cos(rpy[1]) spitch = sin(rpy[1]) croll = cos(rpy[0]) sroll = sin(rpy[0]) R11 = cyaw * cpitch R12 = cyaw * spitch * sroll - syaw * croll R13 = cyaw * spitch * croll + syaw * sroll R21 = syaw * cpitch R22 = syaw * spitch * sroll + cyaw * croll R23 = syaw * spitch * croll - cyaw * sroll R31 = -spitch R32 = cpitch * sroll R33 = cpitch * croll fout = open(file_path + 'calpic' + str(i) + '.coords', 'w') fout.write( str(R11) + ' ' + str(R12) + ' ' + str(R13) + ' ' + str(trans[0] * 1000) + '\n' + str(R21) + ' ' + str(R22) + ' ' + str(R23) + ' ' + str(trans[1] * 1000) + '\n' + str(R31) + ' ' + str(R32) + ' ' + str(R33) + ' ' + str(trans[2] * 1000)) fout.close() except (tf.LookupException, tf.ConnectivityException): print "tf exception" self.sss.sleep(1) cv.SaveImage(file_path + 'calpic' + str(i) + '.png', self.cv_image) self.sss.sleep(1) self.sss.move("torso", "home") print "finished"
def savePicture(self): filename = strftime("%m-%d-%Y_") + "%.3f" % time() cv.SaveImage("pictures/%s.png" % filename, self.currentFrame)
it = model.solve() del p print 'Updated fitting in %i iterations' % it # Now plot the estimated distribution against the actual distribution... img = numpy.ones((height, width, 3)) draw = model.sampleMixture() for px in xrange(width): x = float(px) / float(width) * (high - low) + low y_gt = gt.prob([x]) y_gu = model.prob([x]) y_gd = 0.0 for ind, gauss in enumerate(draw[1]): y_gd += draw[0][ind] * gauss.prob([x]) py_gt = int((1.0 - y_gt / scale) * height) py_gu = int((1.0 - y_gu / scale) * height) py_gd = numpy.clip(int((1.0 - y_gd / scale) * height), 0, height - 1) img[py_gt, px, :] = [0.0, 1.0, 0.0] img[py_gu, px, :] = [1.0, 0.0, 0.0] img[py_gd, px, :] = [0.0, 0.0, 1.0] # Save plot out... img = cvarray.array2cv(img * 255.0) cv.SaveImage('%s/plot_%i.png' % (out_dir, i + 1), img) print
corner_strengths = sorted(corner_strengths, key=itemgetter(0)) best_corners = [] for loopVar2 in range( len(corner_strengths) - 1, len(corner_strengths) - 1 - N, -1): try: cv.Circle(orig_img_1, (corner_strengths[loopVar2][1][1], corner_strengths[loopVar2][1][0]), 2, (255, 0, 0), -1, lineType=8, shift=0) best_corners.append([ corner_strengths[loopVar2][1][0], corner_strengths[loopVar2][1][1] ]) #print corner_strengths[loopVar2] except IndexError: pass #print best_corners, (orig_img_1.height)/2, (orig_img_1.width)/2 feature_vectors.append( find_feature_vector(best_corners, (orig_img_1.height) / 2, (orig_img_1.width) / 2)) print feature_vectors[loopVar1], sum(feature_vectors[loopVar1]) cv.SaveImage(filename2 + str(loopVar1 + 1) + ".jpg", orig_img_1) #Save the result save_features(filename3, feature_vectors) read = readfeatures(filename3) print read, len(read) cv2.waitKey(0) #Wait for key-press'''
# import cv2.cv as cv import cv orig = cv.LoadImage('./demo1.jpg', cv.CV_LOAD_IMAGE_COLOR) im = cv.CreateImage(cv.GetSize(orig), 8, 1) cv.CvtColor(orig, im, cv.CV_BGR2GRAY) #Keep the original in colour to draw contours in the end cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY) cv.ShowImage("Threshold 1", im) cv.SaveImage("threshold1.jpg",im) element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT) cv.MorphologyEx(im, im, None, element, cv.CV_MOP_OPEN) #Open and close to make appear contours cv.MorphologyEx(im, im, None, element, cv.CV_MOP_CLOSE) cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY_INV) cv.ShowImage("After MorphologyEx", im) cv.SaveImage("after.jpg",im) # -------------------------------- vals = cv.CloneImage(im) #Make a clone because FindContours can modify the image contours=cv.FindContours(vals, cv.CreateMemStorage(0), cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0)) _red = (0, 0, 255); #Red for external contours _green = (0, 255, 0);# Gren internal contours levels=2 #1 contours drawn, 2 internal contours as well, 3 ... co=cv.DrawContours (orig, contours, _red, _green, levels, 2, cv.CV_FILLED) #Draw contours on the colour image cv.SaveImage("save.jpg",orig) # cv.SaveImage("co.jpg",co)
def main(): os.chdir(sys.argv[1]) #print "DELETE ALL FILES FIRST!" #tree = et.parse("project.xml") #movie = tree.getroot() #start_frame = int( movie.attrib["start_frame"] ) #end_frame = int( movie.attrib["end_frame"] ) f_shots = open("shots.txt") shots = [ (int(start), int(end)) for start, end in [line.split("\t")[0:2] for line in f_shots if line] ] f_shots.close() f_chapters = open("chapters.txt") chapters = [int(line) for line in f_chapters if line] f_chapters.close() '''# fix first and add last frame chapters[0] = start_frame chapters.append(end_frame)''' os.chdir("shot_colors") try: os.mkdir(OUTPUT_DIR_NAME) except: pass filenames = glob.glob("shot_colors_*.png") last_shot_nr = 0 ch = 1 for i, shot in enumerate(shots): start_frame, end_frame = shot if ch == len(chapters): # will this ever happen, freder? print "den rest noch" #print " ".join(filenames[last_shot_nr:]) os.system("convert %s -append chapters\\chapter_%02d.png" % (" ".join(filenames[last_shot_nr:]), ch)) break elif end_frame >= chapters[ch]: #if end_frame >= chapters[ch]: print ch, ":", last_shot_nr, "->", i - 1 print " ".join(filenames[last_shot_nr:i]) os.system("convert %s -append chapters\\chapter_%02d.png" % (" ".join(filenames[last_shot_nr:i]), ch)) last_shot_nr = i ch += 1 os.chdir(OUTPUT_DIR_NAME) for file_nr, file in enumerate(os.listdir(os.getcwd())): if os.path.isdir(file): continue img_orig = cv.LoadImageM(file) w, h = img_orig.cols, img_orig.rows img_hls = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 3) cv.CvtColor(img_orig, img_hls, cv.CV_BGR2HLS) output_img = cv.CreateImage((PIXELS_PER_COLOR * NUM_CLUSTERS, h), cv.IPL_DEPTH_8U, 3) # convert to numpy array a = numpy.asarray(cv.GetMat(img_hls)) a = a.reshape(a.shape[0] * a.shape[1], a.shape[2]) # make it 1-dimensional # set initial centroids init_cluster = [] step = w / NUM_CLUSTERS #for x, y in [(0*step, h*0.1), (1*step, h*0.3), (2*step, h*0.5), (3*step, h*0.7), (4*step, h*0.9)]: for x, y in [(0 * step, h * 0.1), (1 * step, h * 0.1), (2 * step, h * 0.3), (3 * step, h * 0.3), (4 * step, h * 0.5), (5 * step, h * 0.5), (6 * step, h * 0.7), (7 * step, h * 0.7), (8 * step, h * 0.9), (9 * step, h * 0.9)]: x = int(x) y = int(y) init_cluster.append(a[y * w + x]) centroids, labels = scipy.cluster.vq.kmeans2(a, numpy.array(init_cluster)) vecs, dist = scipy.cluster.vq.vq(a, centroids) # assign codes counts, bins = scipy.histogram(vecs, len(centroids)) # count occurrences centroid_count = [] for i, count in enumerate(counts): if count > 0: centroid_count.append((centroids[i].tolist(), count)) centroid_count.sort(hls_sort2) px_count = w * h x = 0 for item in centroid_count: count = item[1] * (PIXELS_PER_COLOR * NUM_CLUSTERS) count = int(math.ceil(count / float(px_count))) centroid = item[0] for l in range(count): if x + l >= PIXELS_PER_COLOR * NUM_CLUSTERS: break for y in range(h): cv.Set2D(output_img, y, x + l, (centroid[0], centroid[1], centroid[2])) x += count output_img_rgb = cv.CreateImage(cv.GetSize(output_img), cv.IPL_DEPTH_8U, 3) cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR) cv.SaveImage(file, output_img_rgb) # save to text-file if file_nr == 0: f_out = open("..\\..\\chapter_colors.txt", "w") f_out.write("") # reset f_out.close() f_out = open("..\\..\\chapter_colors.txt", "a") row = cv.GetRow(output_img_rgb, 0) WIDTH = row.cols #print WIDTH data_items = [] counter = 0 last_px = cv.Get1D(row, 0) for i in range(WIDTH): px = cv.Get1D(row, i) if px == last_px: counter += 1 if i == WIDTH - 1: #f_out.write("%d, %d, %d, %d _ " % (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter)) data_items.append( "%d, %d, %d, %d" % (int(last_px[2]), int( last_px[1]), int(last_px[0]), counter)) continue else: #f_out.write("%d, %d, %d, %d _ " % (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter)) data_items.append("%d, %d, %d, %d" % (int( last_px[2]), int(last_px[1]), int(last_px[0]), counter)) counter = 1 last_px = px print NUM_CLUSTERS - len(data_items), "colors missing" for j in range(NUM_CLUSTERS - len(data_items)): # sometimes there are fewer colors data_items.append("0, 0, 0, 0") f_out.write(" _ ".join(data_items)) f_out.write("\n") f_out.close() os.system("convert chapter_*.png -append _CHAPTERS.png") return
def main(): args = sys.argv[1:] arg0 = args[0] do_display_output = True if 'display' in args else False if do_display_output: outdir = args[-1] else: outdir = None do_profile = True if 'profile' in args else False if os.path.isdir(arg0): imgpaths = [] for dirpath, dirnames, filenames in os.walk(arg0): for imgname in [f for f in filenames if isimgext(f)]: imgpaths.append(os.path.join(dirpath, imgname)) else: imgpaths = [arg0] template_zero_path = "sequoia_template_zero_skinny.png" template_one_path = "sequoia_template_one_skinny.png" sidesymbol_path = "sequoia_side_symbol.png" Izero = cv.LoadImage(template_zero_path, cv.CV_LOAD_IMAGE_GRAYSCALE) Ione = cv.LoadImage(template_one_path, cv.CV_LOAD_IMAGE_GRAYSCALE) Isidesym = cv.LoadImage(sidesymbol_path, cv.CV_LOAD_IMAGE_GRAYSCALE) IsymA = cv.LoadImage(SYMA_IMGPATH, cv.CV_LOAD_IMAGE_GRAYSCALE) IsymB = cv.LoadImage(SYMB_IMGPATH, cv.CV_LOAD_IMAGE_GRAYSCALE) IsymC = cv.LoadImage(SYMC_IMGPATH, cv.CV_LOAD_IMAGE_GRAYSCALE) IsymD = cv.LoadImage(SYMD_IMGPATH, cv.CV_LOAD_IMAGE_GRAYSCALE) IsymE = cv.LoadImage(SYME_IMGPATH, cv.CV_LOAD_IMAGE_GRAYSCALE) # Rescale IZERO/IONE/ISIDESYM to match this dataset's image dimensions exmpl_imgsize = cv.GetSize(cv.LoadImage(imgpaths[0])) if exmpl_imgsize != (ORIG_IMG_W, ORIG_IMG_H): print "...rescaling images..." Izero = rescale_img(Izero, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) Ione = rescale_img(Ione, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) Isidesym = rescale_img(Isidesym, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) IsymA = rescale_img(IsymA, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) IsymB = rescale_img(IsymB, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) IsymC = rescale_img(IsymC, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) IsymD = rescale_img(IsymD, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) IsymE = rescale_img(IsymE, ORIG_IMG_W, ORIG_IMG_H, exmpl_imgsize[0], exmpl_imgsize[1]) Izero = tempmatch.smooth(Izero, 3, 3, bordertype='const', val=255.0) Ione = tempmatch.smooth(Ione, 3, 3, bordertype='const', val=255.0) Isidesym = tempmatch.smooth(Isidesym, 3, 3, bordertype='const', val=255.0) IsymA = tempmatch.smooth(IsymA, 3, 3, bordertype='const', val=255.0) IsymB = tempmatch.smooth(IsymB, 3, 3, bordertype='const', val=255.0) IsymC = tempmatch.smooth(IsymC, 3, 3, bordertype='const', val=255.0) IsymD = tempmatch.smooth(IsymD, 3, 3, bordertype='const', val=255.0) IsymE = tempmatch.smooth(IsymE, 3, 3, bordertype='const', val=255.0) t = time.time() err_imgpaths = [] for imgpath in imgpaths: I = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE) print "For imgpath {0}:".format(imgpath) side, isflip = get_side(I, IsymA, IsymB, IsymC, IsymD, IsymE) if side == None: print " ERROR GET_SIDE" err_imgpaths.append(imgpath) continue elif side == 1: print " Detected Backside, isflip={0}".format(isflip) continue cv.ResetImageROI(I) if isflip: cv.Flip(I, I, flipMode=-1) decodings, marklocs = decode(I, Izero, Ione, _imgpath=imgpath) if decodings == None: print " ERROR DECODE" err_imgpaths.append(imgpath) continue else: print " {0} isflip={1}".format(decodings, isflip) if not do_display_output: continue # Output colorful image with interpretation displayed nicely Icolor = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_COLOR) if isflip: cv.Flip(Icolor, Icolor, flipMode=-1) for marktype, tups in marklocs.iteritems(): if marktype == MARK_ON: color = cv.CV_RGB(0, 0, 255) else: color = cv.CV_RGB(255, 0, 0) for (imgpath, (x1, y1, x2, y2), userdata) in tups: cv.Rectangle(Icolor, (x1, y1), (x2, y2), color, thickness=2) imgname = os.path.split(imgpath)[1] outrootdir = os.path.join(outdir, imgname) try: os.makedirs(outrootdir) except: pass outpath = os.path.join( outrootdir, "{0}_bbs.png".format(os.path.splitext(imgname)[0])) cv.SaveImage(outpath, Icolor) dur = time.time() - t print "...Finished Decoding {0} images ({1} s).".format(len(imgpaths), dur) print " Avg. Time per Image: {0} s".format(dur / float(len(imgpaths))) print " Number of Errors: {0}".format(len(err_imgpaths)) print "Done."
#resize the image if (keepAspectRatio): resizeFactor = 1.0 if (w > h): resizeFactor = float(maxSize) / float(w) else: resizeFactor = float(maxSize) / float(h) newWidth = int(w * resizeFactor + 0.5) # cols = width newHeight = int(h * resizeFactor + 0.5) # rows = height else: newWidth = int(finalSizeX) # cols = width newHeight = int(finalSizeY) # rows = height imageResized = cv.CreateMat(newHeight, newWidth, image.type) cv.Resize(imageCropped, imageResized, interpolation=interpolation_FLAG) cv.SaveImage(outputPath + imageName, imageResized) partLocsList.close() imageList.close() noRotationList.close() unclearRotationList.close() smallBBList.close() finalBBList.close() finalPartLocsList.close() partsCountList.close()
def main(): import argparse import logging import os import yaml parser = argparse.ArgumentParser() parser.add_argument('classifier') parser.add_argument( '--postprocess', action="store_true", help='Run postprocessing, close blobs and remove noise') parser.add_argument('videolist', help='A file listed all the videos to be indexed') parser.add_argument('cores', type=int, help='Number of processes of paralellism') args = parser.parse_args() logging.basicConfig(level=logging.WARNING, format="%(asctime)s - %(message)s") classifier = zipfile.ZipFile(args.classifier) global forest0, svmmodels, training_bosts, hist0 forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \ load_from_classifier(classifier) classifier.close() KEY_FRAME_PERIOD = 2 # in seconds #queue = Queue.Queue() #data_queue = Queue.Queue() queue = Manager().Queue() data_queue = Manager().Queue() for processes in [2]: pool = Pool(processes=processes) video_list = open(args.videolist, 'r') log_file = open('statistics%d.txt' % processes, 'a') fps = 0 fps_count = 0 for video_file in video_list: video_file = video_file.strip() name = os.path.splitext(video_file)[0] file_path = os.path.join(VIDEO_RESOURCE, video_file) log_file.write(file_path + "\n") capture = cv.CaptureFromFile(file_path) frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) total_frames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT) log_file.write("frame rate: %.3f, total frames: %d\n" % (frame_rate, total_frames)) start_time0 = time.time() key_frame_counter = 0 frame = cv.QueryFrame(capture) os.makedirs("tmp") while frame: cv.SaveImage("tmp/" + name + "%d.png" % key_frame_counter, frame) for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)): frame = cv.QueryFrame(capture) key_frame_counter += 1 for i in xrange(key_frame_counter): data_queue.put(i) start_time = time.time() pool.map(calculate_class, [(name, queue, i) for i in xrange(key_frame_counter)]) elapse_time = time.time() - start_time accuracy_file = open('fact.txt', 'w') while not queue.empty(): q_entry = queue.get() frame_counter = q_entry[0] ILP = q_entry[1] accuracy_file.write('%d' % frame_counter) for class_index, score in enumerate(ILP): accuracy_file.write(',%.02f' % score) accuracy_file.write('\n') accuracy_file.close() os.system("rm -rf tmp") log_file.write("decoding time: %.2f, total time: %.2f, key frames: %d, frame per sec: %.3f\n" \ % (start_time - start_time0, elapse_time, key_frame_counter, key_frame_counter / elapse_time)) fps += key_frame_counter / elapse_time fps_count += 1 #time.sleep(10) video_list.close() log_file.write("average fps: %.3f\n" % (fps / fps_count)) log_file.close()
def publish_image(self, task_id, image, postfix=''): self.image_pubs[task_id].publish(image) ffull = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + postfix + '.jpg') cv.SaveImage(ffull, image)
def main(): os.chdir(sys.argv[1]) output_dir = os.path.join(OUTPUT_DIR_NAME, OUTPUT_DIR_NAME) try: os.mkdir(output_dir) except: pass os.chdir(OUTPUT_DIR_NAME) for file in os.listdir(os.getcwd()): if os.path.isdir(file): continue img_orig = cv.LoadImageM(file) w, h = img_orig.cols, img_orig.rows img_hls = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 3) cv.CvtColor(img_orig, img_hls, cv.CV_BGR2HLS) output_img = cv.CreateImage((PIXELS_PER_COLOR*NUM_CLUSTERS, h), cv.IPL_DEPTH_8U, 3) # convert to numpy array a = numpy.asarray(cv.GetMat(img_hls)) a = a.reshape(a.shape[0] * a.shape[1], a.shape[2]) # make it 1-dimensional # set initial centroids init_cluster = [] step = w / NUM_CLUSTERS for x, y in [(0*step, h*0.1), (1*step, h*0.3), (2*step, h*0.5), (3*step, h*0.7), (4*step, h*0.9)]: x = int(x) y = int(y) init_cluster.append(a[y*w + x]) centroids, labels = scipy.cluster.vq.kmeans2(a, numpy.array(init_cluster)) vecs, dist = scipy.cluster.vq.vq(a, centroids) # assign codes counts, bins = scipy.histogram(vecs, len(centroids)) # count occurrences centroid_count = [] for i, count in enumerate(counts): if count > 0: centroid_count.append((centroids[i].tolist(), count)) #centroids = centroids.tolist() #centroids.sort(hls_sort) centroid_count.sort(hls_sort2) px_count = w * h x = 0 for item in centroid_count: count = item[1] * (PIXELS_PER_COLOR*NUM_CLUSTERS) count = int(math.ceil(count / float(px_count))) centroid = item[0] for l in range(count): if x+l >= PIXELS_PER_COLOR*NUM_CLUSTERS: break for y in range(h): cv.Set2D(output_img, y, x+l, (centroid[0], centroid[1], centroid[2])) x += count #for centroid_nr, centroid in enumerate(centroids): # for j in range(PIXELS_PER_COLOR): # x = centroid_nr*PIXELS_PER_COLOR + j # for y in range(h): # cv.Set2D(output_img, y, x, (centroid[0], centroid[1], centroid[2])) output_img_rgb = cv.CreateImage(cv.GetSize(output_img), cv.IPL_DEPTH_8U, 3) cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR) cv.SaveImage(os.path.join(OUTPUT_DIR_NAME, file), output_img_rgb) print "appending..." os.chdir(OUTPUT_DIR_NAME) os.system("convert shot_colors_*.png -append result.png") #raw_input("- done -") return
def getcamera(): capture = cv.CaptureFromCAM(0) img = cv.QueryFrame(capture) cv.SaveImage("ca1.jpg", img)
def cvmat_iterator(cvmat): for i in range(cvmat.rows): for j in range(cvmat.cols): yield cvmat[i, j] cam = Camera(3.0) rend = Renderer(640, 480, 2) cv.NamedWindow("snap") #images = [rend.frame(i) for i in range(0, 2000, 400)] images = [rend.frame(i) for i in [1200]] if 0: for i, img in enumerate(images): cv.SaveImage("final/%06d.png" % i, img) size = cv.GetSize(images[0]) corners = [get_corners(i) for i in images] goodcorners = [co for (im, (ok, co)) in zip(images, corners) if ok] def checkerboard_error(xformed): def pt2line(a, b, c): x0, y0 = a x1, y1 = b x2, y2 = c return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
blockCentreY = OPTICAL_FLOW_BLOCK_HEIGHT / 2 for y in range(opticalFlowX.shape[0]): blockCentreX = OPTICAL_FLOW_BLOCK_WIDTH / 2 for x in range(opticalFlowX.shape[1]): endX = blockCentreX + cv.Get2D(opticalFlowX, y, x)[0] endY = blockCentreY + cv.Get2D(opticalFlowY, y, x)[0] cv.Line(curImage, (int(blockCentreX), int(blockCentreY)), (int(endX), int(endY)), lineColor) blockCentreX += OPTICAL_FLOW_BLOCK_WIDTH blockCentreY += OPTICAL_FLOW_BLOCK_HEIGHT # Save the image cv.CvtColor(curImage, curImage, cv.CV_RGB2BGR) imageFilename = "{0}{1:08d}.{2}".format( options.prefix, imageIdx, options.imageFormat) cv.SaveImage(imageFilename, curImage) imageIdx += 1 sys.stdout.write("\n") sys.stdout.flush()
#image = Image.open(imagefile) #image.show() for (x, y, w, h) in correct_faces: #cropfile = imagefile + str(nface) + '.jpg' #cv.SaveImage(cropfile, frame[y:y+h, x:x+w]) #crop = Image.open(cropfile) #crop.show() if nface == numrows * maxcropwindows: xcropnext = xcrop numrows += 1 ycrop += int(h / 4) xcropnext = xcropnext - int(2 * w / 3) cv.ShowImage('crop' + str(nface), frame[y:y + h, x:x + w]) cv.SaveImage(imagefile + str(nface) + '.jpg', frame[y:y + h, x:x + w]) cv.SetMouseCallback("crop" + str(nface), on_mouse, param=nface) cv.MoveWindow("crop" + str(nface), xcropnext, ycrop) #xcropnext = xcropnext + w + 4 nface += 1 # sleep(1) #face_size_correct = False #else: #print str(c) clear_tmpfiles() capture = None cv.DestroyAllWindows()
# Setup the mean shift object... ms = MeanShift() ms.set_data(data, 'df') ms.set_spatial('kd_tree') ms.set_scale(numpy.array([1.5, 1.5])) # Do some visualisation... dim = 512 image = numpy.zeros((dim, dim, 3), dtype=numpy.float32) for r in xrange(data.shape[0]): loc = data[r, :] loc = (loc + 5.0) / 10.0 loc *= dim image[int(loc[1] + 0.5), int(loc[0] + 0.5), :] = 64.0 print 'Projecting samples to line...' to_render = 4 * 1024 line = ms.manifolds(data[:to_render], 1, True) print 'Done' for r in xrange(line.shape[0]): loc = line[r, :] loc = (loc + 5.0) / 10.0 loc *= dim image[int(loc[1] + 0.5), int(loc[0] + 0.5), :] = 255.0 image = array2cv(image) cv.SaveImage('manifold2.png', image)